1 /**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2011 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/pci.h> 15 #include <linux/module.h> 16 #include <linux/seq_file.h> 17 #include "net_driver.h" 18 #include "bitfield.h" 19 #include "efx.h" 20 #include "nic.h" 21 #include "regs.h" 22 #include "io.h" 23 #include "workarounds.h" 24 25 /************************************************************************** 26 * 27 * Configurable values 28 * 29 ************************************************************************** 30 */ 31 32 /* This is set to 16 for a good reason. In summary, if larger than 33 * 16, the descriptor cache holds more than a default socket 34 * buffer's worth of packets (for UDP we can only have at most one 35 * socket buffer's worth outstanding). This combined with the fact 36 * that we only get 1 TX event per descriptor cache means the NIC 37 * goes idle. 38 */ 39 #define TX_DC_ENTRIES 16 40 #define TX_DC_ENTRIES_ORDER 1 41 42 #define RX_DC_ENTRIES 64 43 #define RX_DC_ENTRIES_ORDER 3 44 45 /* If EFX_MAX_INT_ERRORS internal errors occur within 46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 47 * disable it. 48 */ 49 #define EFX_INT_ERROR_EXPIRE 3600 50 #define EFX_MAX_INT_ERRORS 5 51 52 /* Depth of RX flush request fifo */ 53 #define EFX_RX_FLUSH_COUNT 4 54 55 /* Driver generated events */ 56 #define _EFX_CHANNEL_MAGIC_TEST 0x000101 57 #define _EFX_CHANNEL_MAGIC_FILL 0x000102 58 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 59 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 60 61 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 62 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 63 64 #define EFX_CHANNEL_MAGIC_TEST(_channel) \ 65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) 66 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ 67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ 68 efx_rx_queue_index(_rx_queue)) 69 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ 70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ 71 efx_rx_queue_index(_rx_queue)) 72 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ 73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ 74 (_tx_queue)->queue) 75 76 /************************************************************************** 77 * 78 * Solarstorm hardware access 79 * 80 **************************************************************************/ 81 82 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 83 unsigned int index) 84 { 85 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 86 value, index); 87 } 88 89 /* Read the current event from the event queue */ 90 static inline efx_qword_t *efx_event(struct efx_channel *channel, 91 unsigned int index) 92 { 93 return ((efx_qword_t *) (channel->eventq.addr)) + 94 (index & channel->eventq_mask); 95 } 96 97 /* See if an event is present 98 * 99 * We check both the high and low dword of the event for all ones. We 100 * wrote all ones when we cleared the event, and no valid event can 101 * have all ones in either its high or low dwords. This approach is 102 * robust against reordering. 103 * 104 * Note that using a single 64-bit comparison is incorrect; even 105 * though the CPU read will be atomic, the DMA write may not be. 106 */ 107 static inline int efx_event_present(efx_qword_t *event) 108 { 109 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 110 EFX_DWORD_IS_ALL_ONES(event->dword[1])); 111 } 112 113 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 114 const efx_oword_t *mask) 115 { 116 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 117 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 118 } 119 120 int efx_nic_test_registers(struct efx_nic *efx, 121 const struct efx_nic_register_test *regs, 122 size_t n_regs) 123 { 124 unsigned address = 0, i, j; 125 efx_oword_t mask, imask, original, reg, buf; 126 127 /* Falcon should be in loopback to isolate the XMAC from the PHY */ 128 WARN_ON(!LOOPBACK_INTERNAL(efx)); 129 130 for (i = 0; i < n_regs; ++i) { 131 address = regs[i].address; 132 mask = imask = regs[i].mask; 133 EFX_INVERT_OWORD(imask); 134 135 efx_reado(efx, &original, address); 136 137 /* bit sweep on and off */ 138 for (j = 0; j < 128; j++) { 139 if (!EFX_EXTRACT_OWORD32(mask, j, j)) 140 continue; 141 142 /* Test this testable bit can be set in isolation */ 143 EFX_AND_OWORD(reg, original, mask); 144 EFX_SET_OWORD32(reg, j, j, 1); 145 146 efx_writeo(efx, ®, address); 147 efx_reado(efx, &buf, address); 148 149 if (efx_masked_compare_oword(®, &buf, &mask)) 150 goto fail; 151 152 /* Test this testable bit can be cleared in isolation */ 153 EFX_OR_OWORD(reg, original, mask); 154 EFX_SET_OWORD32(reg, j, j, 0); 155 156 efx_writeo(efx, ®, address); 157 efx_reado(efx, &buf, address); 158 159 if (efx_masked_compare_oword(®, &buf, &mask)) 160 goto fail; 161 } 162 163 efx_writeo(efx, &original, address); 164 } 165 166 return 0; 167 168 fail: 169 netif_err(efx, hw, efx->net_dev, 170 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 171 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 172 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 173 return -EIO; 174 } 175 176 /************************************************************************** 177 * 178 * Special buffer handling 179 * Special buffers are used for event queues and the TX and RX 180 * descriptor rings. 181 * 182 *************************************************************************/ 183 184 /* 185 * Initialise a special buffer 186 * 187 * This will define a buffer (previously allocated via 188 * efx_alloc_special_buffer()) in the buffer table, allowing 189 * it to be used for event queues, descriptor rings etc. 190 */ 191 static void 192 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 193 { 194 efx_qword_t buf_desc; 195 unsigned int index; 196 dma_addr_t dma_addr; 197 int i; 198 199 EFX_BUG_ON_PARANOID(!buffer->addr); 200 201 /* Write buffer descriptors to NIC */ 202 for (i = 0; i < buffer->entries; i++) { 203 index = buffer->index + i; 204 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE); 205 netif_dbg(efx, probe, efx->net_dev, 206 "mapping special buffer %d at %llx\n", 207 index, (unsigned long long)dma_addr); 208 EFX_POPULATE_QWORD_3(buf_desc, 209 FRF_AZ_BUF_ADR_REGION, 0, 210 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 211 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 212 efx_write_buf_tbl(efx, &buf_desc, index); 213 } 214 } 215 216 /* Unmaps a buffer and clears the buffer table entries */ 217 static void 218 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 219 { 220 efx_oword_t buf_tbl_upd; 221 unsigned int start = buffer->index; 222 unsigned int end = (buffer->index + buffer->entries - 1); 223 224 if (!buffer->entries) 225 return; 226 227 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 228 buffer->index, buffer->index + buffer->entries - 1); 229 230 EFX_POPULATE_OWORD_4(buf_tbl_upd, 231 FRF_AZ_BUF_UPD_CMD, 0, 232 FRF_AZ_BUF_CLR_CMD, 1, 233 FRF_AZ_BUF_CLR_END_ID, end, 234 FRF_AZ_BUF_CLR_START_ID, start); 235 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 236 } 237 238 /* 239 * Allocate a new special buffer 240 * 241 * This allocates memory for a new buffer, clears it and allocates a 242 * new buffer ID range. It does not write into the buffer table. 243 * 244 * This call will allocate 4KB buffers, since 8KB buffers can't be 245 * used for event queues and descriptor rings. 246 */ 247 static int efx_alloc_special_buffer(struct efx_nic *efx, 248 struct efx_special_buffer *buffer, 249 unsigned int len) 250 { 251 len = ALIGN(len, EFX_BUF_SIZE); 252 253 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 254 &buffer->dma_addr, GFP_KERNEL); 255 if (!buffer->addr) 256 return -ENOMEM; 257 buffer->len = len; 258 buffer->entries = len / EFX_BUF_SIZE; 259 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); 260 261 /* All zeros is a potentially valid event so memset to 0xff */ 262 memset(buffer->addr, 0xff, len); 263 264 /* Select new buffer ID */ 265 buffer->index = efx->next_buffer_table; 266 efx->next_buffer_table += buffer->entries; 267 #ifdef CONFIG_SFC_SRIOV 268 BUG_ON(efx_sriov_enabled(efx) && 269 efx->vf_buftbl_base < efx->next_buffer_table); 270 #endif 271 272 netif_dbg(efx, probe, efx->net_dev, 273 "allocating special buffers %d-%d at %llx+%x " 274 "(virt %p phys %llx)\n", buffer->index, 275 buffer->index + buffer->entries - 1, 276 (u64)buffer->dma_addr, len, 277 buffer->addr, (u64)virt_to_phys(buffer->addr)); 278 279 return 0; 280 } 281 282 static void 283 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 284 { 285 if (!buffer->addr) 286 return; 287 288 netif_dbg(efx, hw, efx->net_dev, 289 "deallocating special buffers %d-%d at %llx+%x " 290 "(virt %p phys %llx)\n", buffer->index, 291 buffer->index + buffer->entries - 1, 292 (u64)buffer->dma_addr, buffer->len, 293 buffer->addr, (u64)virt_to_phys(buffer->addr)); 294 295 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, 296 buffer->dma_addr); 297 buffer->addr = NULL; 298 buffer->entries = 0; 299 } 300 301 /************************************************************************** 302 * 303 * Generic buffer handling 304 * These buffers are used for interrupt status and MAC stats 305 * 306 **************************************************************************/ 307 308 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 309 unsigned int len) 310 { 311 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 312 &buffer->dma_addr); 313 if (!buffer->addr) 314 return -ENOMEM; 315 buffer->len = len; 316 memset(buffer->addr, 0, len); 317 return 0; 318 } 319 320 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 321 { 322 if (buffer->addr) { 323 pci_free_consistent(efx->pci_dev, buffer->len, 324 buffer->addr, buffer->dma_addr); 325 buffer->addr = NULL; 326 } 327 } 328 329 /************************************************************************** 330 * 331 * TX path 332 * 333 **************************************************************************/ 334 335 /* Returns a pointer to the specified transmit descriptor in the TX 336 * descriptor queue belonging to the specified channel. 337 */ 338 static inline efx_qword_t * 339 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 340 { 341 return ((efx_qword_t *) (tx_queue->txd.addr)) + index; 342 } 343 344 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 345 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) 346 { 347 unsigned write_ptr; 348 efx_dword_t reg; 349 350 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 351 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 352 efx_writed_page(tx_queue->efx, ®, 353 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 354 } 355 356 /* Write pointer and first descriptor for TX descriptor ring */ 357 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, 358 const efx_qword_t *txd) 359 { 360 unsigned write_ptr; 361 efx_oword_t reg; 362 363 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 364 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 365 366 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 367 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 368 FRF_AZ_TX_DESC_WPTR, write_ptr); 369 reg.qword[0] = *txd; 370 efx_writeo_page(tx_queue->efx, ®, 371 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 372 } 373 374 static inline bool 375 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) 376 { 377 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 378 379 if (empty_read_count == 0) 380 return false; 381 382 tx_queue->empty_read_count = 0; 383 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; 384 } 385 386 /* For each entry inserted into the software descriptor ring, create a 387 * descriptor in the hardware TX descriptor ring (in host memory), and 388 * write a doorbell. 389 */ 390 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 391 { 392 393 struct efx_tx_buffer *buffer; 394 efx_qword_t *txd; 395 unsigned write_ptr; 396 unsigned old_write_count = tx_queue->write_count; 397 398 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 399 400 do { 401 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 402 buffer = &tx_queue->buffer[write_ptr]; 403 txd = efx_tx_desc(tx_queue, write_ptr); 404 ++tx_queue->write_count; 405 406 /* Create TX descriptor ring entry */ 407 EFX_POPULATE_QWORD_4(*txd, 408 FSF_AZ_TX_KER_CONT, buffer->continuation, 409 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 410 FSF_AZ_TX_KER_BUF_REGION, 0, 411 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 412 } while (tx_queue->write_count != tx_queue->insert_count); 413 414 wmb(); /* Ensure descriptors are written before they are fetched */ 415 416 if (efx_may_push_tx_desc(tx_queue, old_write_count)) { 417 txd = efx_tx_desc(tx_queue, 418 old_write_count & tx_queue->ptr_mask); 419 efx_push_tx_desc(tx_queue, txd); 420 ++tx_queue->pushes; 421 } else { 422 efx_notify_tx_desc(tx_queue); 423 } 424 } 425 426 /* Allocate hardware resources for a TX queue */ 427 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 428 { 429 struct efx_nic *efx = tx_queue->efx; 430 unsigned entries; 431 432 entries = tx_queue->ptr_mask + 1; 433 return efx_alloc_special_buffer(efx, &tx_queue->txd, 434 entries * sizeof(efx_qword_t)); 435 } 436 437 void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 438 { 439 struct efx_nic *efx = tx_queue->efx; 440 efx_oword_t reg; 441 442 /* Pin TX descriptor ring */ 443 efx_init_special_buffer(efx, &tx_queue->txd); 444 445 /* Push TX descriptor ring to card */ 446 EFX_POPULATE_OWORD_10(reg, 447 FRF_AZ_TX_DESCQ_EN, 1, 448 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 449 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 450 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 451 FRF_AZ_TX_DESCQ_EVQ_ID, 452 tx_queue->channel->channel, 453 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 454 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 455 FRF_AZ_TX_DESCQ_SIZE, 456 __ffs(tx_queue->txd.entries), 457 FRF_AZ_TX_DESCQ_TYPE, 0, 458 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 459 460 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 461 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 462 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 463 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, 464 !csum); 465 } 466 467 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 468 tx_queue->queue); 469 470 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 471 /* Only 128 bits in this register */ 472 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 473 474 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 475 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 476 clear_bit_le(tx_queue->queue, (void *)®); 477 else 478 set_bit_le(tx_queue->queue, (void *)®); 479 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 480 } 481 482 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 483 EFX_POPULATE_OWORD_1(reg, 484 FRF_BZ_TX_PACE, 485 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 486 FFE_BZ_TX_PACE_OFF : 487 FFE_BZ_TX_PACE_RESERVED); 488 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, 489 tx_queue->queue); 490 } 491 } 492 493 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 494 { 495 struct efx_nic *efx = tx_queue->efx; 496 efx_oword_t tx_flush_descq; 497 498 EFX_POPULATE_OWORD_2(tx_flush_descq, 499 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 500 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 501 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 502 } 503 504 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) 505 { 506 struct efx_nic *efx = tx_queue->efx; 507 efx_oword_t tx_desc_ptr; 508 509 /* Remove TX descriptor ring from card */ 510 EFX_ZERO_OWORD(tx_desc_ptr); 511 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 512 tx_queue->queue); 513 514 /* Unpin TX descriptor ring */ 515 efx_fini_special_buffer(efx, &tx_queue->txd); 516 } 517 518 /* Free buffers backing TX queue */ 519 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 520 { 521 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 522 } 523 524 /************************************************************************** 525 * 526 * RX path 527 * 528 **************************************************************************/ 529 530 /* Returns a pointer to the specified descriptor in the RX descriptor queue */ 531 static inline efx_qword_t * 532 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 533 { 534 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; 535 } 536 537 /* This creates an entry in the RX descriptor queue */ 538 static inline void 539 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 540 { 541 struct efx_rx_buffer *rx_buf; 542 efx_qword_t *rxd; 543 544 rxd = efx_rx_desc(rx_queue, index); 545 rx_buf = efx_rx_buffer(rx_queue, index); 546 EFX_POPULATE_QWORD_3(*rxd, 547 FSF_AZ_RX_KER_BUF_SIZE, 548 rx_buf->len - 549 rx_queue->efx->type->rx_buffer_padding, 550 FSF_AZ_RX_KER_BUF_REGION, 0, 551 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 552 } 553 554 /* This writes to the RX_DESC_WPTR register for the specified receive 555 * descriptor ring. 556 */ 557 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 558 { 559 struct efx_nic *efx = rx_queue->efx; 560 efx_dword_t reg; 561 unsigned write_ptr; 562 563 while (rx_queue->notified_count != rx_queue->added_count) { 564 efx_build_rx_desc( 565 rx_queue, 566 rx_queue->notified_count & rx_queue->ptr_mask); 567 ++rx_queue->notified_count; 568 } 569 570 wmb(); 571 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 572 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 573 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 574 efx_rx_queue_index(rx_queue)); 575 } 576 577 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 578 { 579 struct efx_nic *efx = rx_queue->efx; 580 unsigned entries; 581 582 entries = rx_queue->ptr_mask + 1; 583 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 584 entries * sizeof(efx_qword_t)); 585 } 586 587 void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 588 { 589 efx_oword_t rx_desc_ptr; 590 struct efx_nic *efx = rx_queue->efx; 591 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 592 bool iscsi_digest_en = is_b0; 593 594 netif_dbg(efx, hw, efx->net_dev, 595 "RX queue %d ring in special buffers %d-%d\n", 596 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 597 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 598 599 /* Pin RX descriptor ring */ 600 efx_init_special_buffer(efx, &rx_queue->rxd); 601 602 /* Push RX descriptor ring to card */ 603 EFX_POPULATE_OWORD_10(rx_desc_ptr, 604 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 605 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 606 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 607 FRF_AZ_RX_DESCQ_EVQ_ID, 608 efx_rx_queue_channel(rx_queue)->channel, 609 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 610 FRF_AZ_RX_DESCQ_LABEL, 611 efx_rx_queue_index(rx_queue), 612 FRF_AZ_RX_DESCQ_SIZE, 613 __ffs(rx_queue->rxd.entries), 614 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 615 /* For >=B0 this is scatter so disable */ 616 FRF_AZ_RX_DESCQ_JUMBO, !is_b0, 617 FRF_AZ_RX_DESCQ_EN, 1); 618 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 619 efx_rx_queue_index(rx_queue)); 620 } 621 622 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 623 { 624 struct efx_nic *efx = rx_queue->efx; 625 efx_oword_t rx_flush_descq; 626 627 EFX_POPULATE_OWORD_2(rx_flush_descq, 628 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 629 FRF_AZ_RX_FLUSH_DESCQ, 630 efx_rx_queue_index(rx_queue)); 631 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 632 } 633 634 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) 635 { 636 efx_oword_t rx_desc_ptr; 637 struct efx_nic *efx = rx_queue->efx; 638 639 /* Remove RX descriptor ring from card */ 640 EFX_ZERO_OWORD(rx_desc_ptr); 641 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 642 efx_rx_queue_index(rx_queue)); 643 644 /* Unpin RX descriptor ring */ 645 efx_fini_special_buffer(efx, &rx_queue->rxd); 646 } 647 648 /* Free buffers backing RX queue */ 649 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 650 { 651 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 652 } 653 654 /************************************************************************** 655 * 656 * Flush handling 657 * 658 **************************************************************************/ 659 660 /* efx_nic_flush_queues() must be woken up when all flushes are completed, 661 * or more RX flushes can be kicked off. 662 */ 663 static bool efx_flush_wake(struct efx_nic *efx) 664 { 665 /* Ensure that all updates are visible to efx_nic_flush_queues() */ 666 smp_mb(); 667 668 return (atomic_read(&efx->drain_pending) == 0 || 669 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT 670 && atomic_read(&efx->rxq_flush_pending) > 0)); 671 } 672 673 /* Flush all the transmit queues, and continue flushing receive queues until 674 * they're all flushed. Wait for the DRAIN events to be recieved so that there 675 * are no more RX and TX events left on any channel. */ 676 int efx_nic_flush_queues(struct efx_nic *efx) 677 { 678 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ 679 struct efx_channel *channel; 680 struct efx_rx_queue *rx_queue; 681 struct efx_tx_queue *tx_queue; 682 int rc = 0; 683 684 efx->fc_disable++; 685 efx->type->prepare_flush(efx); 686 687 efx_for_each_channel(channel, efx) { 688 efx_for_each_channel_tx_queue(tx_queue, channel) { 689 atomic_inc(&efx->drain_pending); 690 efx_flush_tx_queue(tx_queue); 691 } 692 efx_for_each_channel_rx_queue(rx_queue, channel) { 693 atomic_inc(&efx->drain_pending); 694 rx_queue->flush_pending = true; 695 atomic_inc(&efx->rxq_flush_pending); 696 } 697 } 698 699 while (timeout && atomic_read(&efx->drain_pending) > 0) { 700 /* If SRIOV is enabled, then offload receive queue flushing to 701 * the firmware (though we will still have to poll for 702 * completion). If that fails, fall back to the old scheme. 703 */ 704 if (efx_sriov_enabled(efx)) { 705 rc = efx_mcdi_flush_rxqs(efx); 706 if (!rc) 707 goto wait; 708 } 709 710 /* The hardware supports four concurrent rx flushes, each of 711 * which may need to be retried if there is an outstanding 712 * descriptor fetch 713 */ 714 efx_for_each_channel(channel, efx) { 715 efx_for_each_channel_rx_queue(rx_queue, channel) { 716 if (atomic_read(&efx->rxq_flush_outstanding) >= 717 EFX_RX_FLUSH_COUNT) 718 break; 719 720 if (rx_queue->flush_pending) { 721 rx_queue->flush_pending = false; 722 atomic_dec(&efx->rxq_flush_pending); 723 atomic_inc(&efx->rxq_flush_outstanding); 724 efx_flush_rx_queue(rx_queue); 725 } 726 } 727 } 728 729 wait: 730 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx), 731 timeout); 732 } 733 734 if (atomic_read(&efx->drain_pending)) { 735 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 736 "(rx %d+%d)\n", atomic_read(&efx->drain_pending), 737 atomic_read(&efx->rxq_flush_outstanding), 738 atomic_read(&efx->rxq_flush_pending)); 739 rc = -ETIMEDOUT; 740 741 atomic_set(&efx->drain_pending, 0); 742 atomic_set(&efx->rxq_flush_pending, 0); 743 atomic_set(&efx->rxq_flush_outstanding, 0); 744 } 745 746 efx->fc_disable--; 747 748 return rc; 749 } 750 751 /************************************************************************** 752 * 753 * Event queue processing 754 * Event queues are processed by per-channel tasklets. 755 * 756 **************************************************************************/ 757 758 /* Update a channel's event queue's read pointer (RPTR) register 759 * 760 * This writes the EVQ_RPTR_REG register for the specified channel's 761 * event queue. 762 */ 763 void efx_nic_eventq_read_ack(struct efx_channel *channel) 764 { 765 efx_dword_t reg; 766 struct efx_nic *efx = channel->efx; 767 768 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 769 channel->eventq_read_ptr & channel->eventq_mask); 770 efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, 771 channel->channel); 772 } 773 774 /* Use HW to insert a SW defined event */ 775 void efx_generate_event(struct efx_nic *efx, unsigned int evq, 776 efx_qword_t *event) 777 { 778 efx_oword_t drv_ev_reg; 779 780 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 781 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 782 drv_ev_reg.u32[0] = event->u32[0]; 783 drv_ev_reg.u32[1] = event->u32[1]; 784 drv_ev_reg.u32[2] = 0; 785 drv_ev_reg.u32[3] = 0; 786 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); 787 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); 788 } 789 790 static void efx_magic_event(struct efx_channel *channel, u32 magic) 791 { 792 efx_qword_t event; 793 794 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 795 FSE_AZ_EV_CODE_DRV_GEN_EV, 796 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 797 efx_generate_event(channel->efx, channel->channel, &event); 798 } 799 800 /* Handle a transmit completion event 801 * 802 * The NIC batches TX completion events; the message we receive is of 803 * the form "complete all TX events up to this index". 804 */ 805 static int 806 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 807 { 808 unsigned int tx_ev_desc_ptr; 809 unsigned int tx_ev_q_label; 810 struct efx_tx_queue *tx_queue; 811 struct efx_nic *efx = channel->efx; 812 int tx_packets = 0; 813 814 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 815 return 0; 816 817 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 818 /* Transmit completion */ 819 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 820 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 821 tx_queue = efx_channel_get_tx_queue( 822 channel, tx_ev_q_label % EFX_TXQ_TYPES); 823 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 824 tx_queue->ptr_mask); 825 channel->irq_mod_score += tx_packets; 826 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 827 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 828 /* Rewrite the FIFO write pointer */ 829 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 830 tx_queue = efx_channel_get_tx_queue( 831 channel, tx_ev_q_label % EFX_TXQ_TYPES); 832 833 netif_tx_lock(efx->net_dev); 834 efx_notify_tx_desc(tx_queue); 835 netif_tx_unlock(efx->net_dev); 836 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 837 EFX_WORKAROUND_10727(efx)) { 838 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 839 } else { 840 netif_err(efx, tx_err, efx->net_dev, 841 "channel %d unexpected TX event " 842 EFX_QWORD_FMT"\n", channel->channel, 843 EFX_QWORD_VAL(*event)); 844 } 845 846 return tx_packets; 847 } 848 849 /* Detect errors included in the rx_evt_pkt_ok bit. */ 850 static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 851 const efx_qword_t *event) 852 { 853 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 854 struct efx_nic *efx = rx_queue->efx; 855 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 856 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 857 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 858 bool rx_ev_other_err, rx_ev_pause_frm; 859 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 860 unsigned rx_ev_pkt_type; 861 862 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 863 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 864 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 865 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 866 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 867 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 868 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 869 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 870 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 871 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 872 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 873 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 874 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 875 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 876 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 877 878 /* Every error apart from tobe_disc and pause_frm */ 879 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 880 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 881 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 882 883 /* Count errors that are not in MAC stats. Ignore expected 884 * checksum errors during self-test. */ 885 if (rx_ev_frm_trunc) 886 ++channel->n_rx_frm_trunc; 887 else if (rx_ev_tobe_disc) 888 ++channel->n_rx_tobe_disc; 889 else if (!efx->loopback_selftest) { 890 if (rx_ev_ip_hdr_chksum_err) 891 ++channel->n_rx_ip_hdr_chksum_err; 892 else if (rx_ev_tcp_udp_chksum_err) 893 ++channel->n_rx_tcp_udp_chksum_err; 894 } 895 896 /* TOBE_DISC is expected on unicast mismatches; don't print out an 897 * error message. FRM_TRUNC indicates RXDP dropped the packet due 898 * to a FIFO overflow. 899 */ 900 #ifdef DEBUG 901 if (rx_ev_other_err && net_ratelimit()) { 902 netif_dbg(efx, rx_err, efx->net_dev, 903 " RX queue %d unexpected RX event " 904 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 905 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 906 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 907 rx_ev_ip_hdr_chksum_err ? 908 " [IP_HDR_CHKSUM_ERR]" : "", 909 rx_ev_tcp_udp_chksum_err ? 910 " [TCP_UDP_CHKSUM_ERR]" : "", 911 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 912 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 913 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 914 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 915 rx_ev_pause_frm ? " [PAUSE]" : ""); 916 } 917 #endif 918 919 /* The frame must be discarded if any of these are true. */ 920 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 921 rx_ev_tobe_disc | rx_ev_pause_frm) ? 922 EFX_RX_PKT_DISCARD : 0; 923 } 924 925 /* Handle receive events that are not in-order. */ 926 static void 927 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 928 { 929 struct efx_nic *efx = rx_queue->efx; 930 unsigned expected, dropped; 931 932 expected = rx_queue->removed_count & rx_queue->ptr_mask; 933 dropped = (index - expected) & rx_queue->ptr_mask; 934 netif_info(efx, rx_err, efx->net_dev, 935 "dropped %d events (index=%d expected=%d)\n", 936 dropped, index, expected); 937 938 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 939 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 940 } 941 942 /* Handle a packet received event 943 * 944 * The NIC gives a "discard" flag if it's a unicast packet with the 945 * wrong destination address 946 * Also "is multicast" and "matches multicast filter" flags can be used to 947 * discard non-matching multicast packets. 948 */ 949 static void 950 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 951 { 952 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 953 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 954 unsigned expected_ptr; 955 bool rx_ev_pkt_ok; 956 u16 flags; 957 struct efx_rx_queue *rx_queue; 958 struct efx_nic *efx = channel->efx; 959 960 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 961 return; 962 963 /* Basic packet information */ 964 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 965 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 966 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 967 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); 968 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); 969 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 970 channel->channel); 971 972 rx_queue = efx_channel_get_rx_queue(channel); 973 974 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 975 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 976 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 977 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 978 979 if (likely(rx_ev_pkt_ok)) { 980 /* If packet is marked as OK and packet type is TCP/IP or 981 * UDP/IP, then we can rely on the hardware checksum. 982 */ 983 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 984 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ? 985 EFX_RX_PKT_CSUMMED : 0; 986 } else { 987 flags = efx_handle_rx_not_ok(rx_queue, event); 988 } 989 990 /* Detect multicast packets that didn't match the filter */ 991 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 992 if (rx_ev_mcast_pkt) { 993 unsigned int rx_ev_mcast_hash_match = 994 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 995 996 if (unlikely(!rx_ev_mcast_hash_match)) { 997 ++channel->n_rx_mcast_mismatch; 998 flags |= EFX_RX_PKT_DISCARD; 999 } 1000 } 1001 1002 channel->irq_mod_score += 2; 1003 1004 /* Handle received packet */ 1005 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); 1006 } 1007 1008 /* If this flush done event corresponds to a &struct efx_tx_queue, then 1009 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue 1010 * of all transmit completions. 1011 */ 1012 static void 1013 efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1014 { 1015 struct efx_tx_queue *tx_queue; 1016 int qid; 1017 1018 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1019 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { 1020 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, 1021 qid % EFX_TXQ_TYPES); 1022 1023 efx_magic_event(tx_queue->channel, 1024 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 1025 } 1026 } 1027 1028 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush 1029 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add 1030 * the RX queue back to the mask of RX queues in need of flushing. 1031 */ 1032 static void 1033 efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1034 { 1035 struct efx_channel *channel; 1036 struct efx_rx_queue *rx_queue; 1037 int qid; 1038 bool failed; 1039 1040 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1041 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1042 if (qid >= efx->n_channels) 1043 return; 1044 channel = efx_get_channel(efx, qid); 1045 if (!efx_channel_has_rx_queue(channel)) 1046 return; 1047 rx_queue = efx_channel_get_rx_queue(channel); 1048 1049 if (failed) { 1050 netif_info(efx, hw, efx->net_dev, 1051 "RXQ %d flush retry\n", qid); 1052 rx_queue->flush_pending = true; 1053 atomic_inc(&efx->rxq_flush_pending); 1054 } else { 1055 efx_magic_event(efx_rx_queue_channel(rx_queue), 1056 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); 1057 } 1058 atomic_dec(&efx->rxq_flush_outstanding); 1059 if (efx_flush_wake(efx)) 1060 wake_up(&efx->flush_wq); 1061 } 1062 1063 static void 1064 efx_handle_drain_event(struct efx_channel *channel) 1065 { 1066 struct efx_nic *efx = channel->efx; 1067 1068 WARN_ON(atomic_read(&efx->drain_pending) == 0); 1069 atomic_dec(&efx->drain_pending); 1070 if (efx_flush_wake(efx)) 1071 wake_up(&efx->flush_wq); 1072 } 1073 1074 static void 1075 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 1076 { 1077 struct efx_nic *efx = channel->efx; 1078 struct efx_rx_queue *rx_queue = 1079 efx_channel_has_rx_queue(channel) ? 1080 efx_channel_get_rx_queue(channel) : NULL; 1081 unsigned magic, code; 1082 1083 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 1084 code = _EFX_CHANNEL_MAGIC_CODE(magic); 1085 1086 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { 1087 /* ignore */ 1088 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { 1089 /* The queue must be empty, so we won't receive any rx 1090 * events, so efx_process_channel() won't refill the 1091 * queue. Refill it here */ 1092 efx_fast_push_rx_descriptors(rx_queue); 1093 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { 1094 rx_queue->enabled = false; 1095 efx_handle_drain_event(channel); 1096 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { 1097 efx_handle_drain_event(channel); 1098 } else { 1099 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1100 "generated event "EFX_QWORD_FMT"\n", 1101 channel->channel, EFX_QWORD_VAL(*event)); 1102 } 1103 } 1104 1105 static void 1106 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 1107 { 1108 struct efx_nic *efx = channel->efx; 1109 unsigned int ev_sub_code; 1110 unsigned int ev_sub_data; 1111 1112 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 1113 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1114 1115 switch (ev_sub_code) { 1116 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1117 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1118 channel->channel, ev_sub_data); 1119 efx_handle_tx_flush_done(efx, event); 1120 efx_sriov_tx_flush_done(efx, event); 1121 break; 1122 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1123 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1124 channel->channel, ev_sub_data); 1125 efx_handle_rx_flush_done(efx, event); 1126 efx_sriov_rx_flush_done(efx, event); 1127 break; 1128 case FSE_AZ_EVQ_INIT_DONE_EV: 1129 netif_dbg(efx, hw, efx->net_dev, 1130 "channel %d EVQ %d initialised\n", 1131 channel->channel, ev_sub_data); 1132 break; 1133 case FSE_AZ_SRM_UPD_DONE_EV: 1134 netif_vdbg(efx, hw, efx->net_dev, 1135 "channel %d SRAM update done\n", channel->channel); 1136 break; 1137 case FSE_AZ_WAKE_UP_EV: 1138 netif_vdbg(efx, hw, efx->net_dev, 1139 "channel %d RXQ %d wakeup event\n", 1140 channel->channel, ev_sub_data); 1141 break; 1142 case FSE_AZ_TIMER_EV: 1143 netif_vdbg(efx, hw, efx->net_dev, 1144 "channel %d RX queue %d timer expired\n", 1145 channel->channel, ev_sub_data); 1146 break; 1147 case FSE_AA_RX_RECOVER_EV: 1148 netif_err(efx, rx_err, efx->net_dev, 1149 "channel %d seen DRIVER RX_RESET event. " 1150 "Resetting.\n", channel->channel); 1151 atomic_inc(&efx->rx_reset); 1152 efx_schedule_reset(efx, 1153 EFX_WORKAROUND_6555(efx) ? 1154 RESET_TYPE_RX_RECOVERY : 1155 RESET_TYPE_DISABLE); 1156 break; 1157 case FSE_BZ_RX_DSC_ERROR_EV: 1158 if (ev_sub_data < EFX_VI_BASE) { 1159 netif_err(efx, rx_err, efx->net_dev, 1160 "RX DMA Q %d reports descriptor fetch error." 1161 " RX Q %d is disabled.\n", ev_sub_data, 1162 ev_sub_data); 1163 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 1164 } else 1165 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1166 break; 1167 case FSE_BZ_TX_DSC_ERROR_EV: 1168 if (ev_sub_data < EFX_VI_BASE) { 1169 netif_err(efx, tx_err, efx->net_dev, 1170 "TX DMA Q %d reports descriptor fetch error." 1171 " TX Q %d is disabled.\n", ev_sub_data, 1172 ev_sub_data); 1173 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 1174 } else 1175 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1176 break; 1177 default: 1178 netif_vdbg(efx, hw, efx->net_dev, 1179 "channel %d unknown driver event code %d " 1180 "data %04x\n", channel->channel, ev_sub_code, 1181 ev_sub_data); 1182 break; 1183 } 1184 } 1185 1186 int efx_nic_process_eventq(struct efx_channel *channel, int budget) 1187 { 1188 struct efx_nic *efx = channel->efx; 1189 unsigned int read_ptr; 1190 efx_qword_t event, *p_event; 1191 int ev_code; 1192 int tx_packets = 0; 1193 int spent = 0; 1194 1195 read_ptr = channel->eventq_read_ptr; 1196 1197 for (;;) { 1198 p_event = efx_event(channel, read_ptr); 1199 event = *p_event; 1200 1201 if (!efx_event_present(&event)) 1202 /* End of events */ 1203 break; 1204 1205 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1206 "channel %d event is "EFX_QWORD_FMT"\n", 1207 channel->channel, EFX_QWORD_VAL(event)); 1208 1209 /* Clear this event by marking it all ones */ 1210 EFX_SET_QWORD(*p_event); 1211 1212 ++read_ptr; 1213 1214 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1215 1216 switch (ev_code) { 1217 case FSE_AZ_EV_CODE_RX_EV: 1218 efx_handle_rx_event(channel, &event); 1219 if (++spent == budget) 1220 goto out; 1221 break; 1222 case FSE_AZ_EV_CODE_TX_EV: 1223 tx_packets += efx_handle_tx_event(channel, &event); 1224 if (tx_packets > efx->txq_entries) { 1225 spent = budget; 1226 goto out; 1227 } 1228 break; 1229 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1230 efx_handle_generated_event(channel, &event); 1231 break; 1232 case FSE_AZ_EV_CODE_DRIVER_EV: 1233 efx_handle_driver_event(channel, &event); 1234 break; 1235 case FSE_CZ_EV_CODE_USER_EV: 1236 efx_sriov_event(channel, &event); 1237 break; 1238 case FSE_CZ_EV_CODE_MCDI_EV: 1239 efx_mcdi_process_event(channel, &event); 1240 break; 1241 case FSE_AZ_EV_CODE_GLOBAL_EV: 1242 if (efx->type->handle_global_event && 1243 efx->type->handle_global_event(channel, &event)) 1244 break; 1245 /* else fall through */ 1246 default: 1247 netif_err(channel->efx, hw, channel->efx->net_dev, 1248 "channel %d unknown event type %d (data " 1249 EFX_QWORD_FMT ")\n", channel->channel, 1250 ev_code, EFX_QWORD_VAL(event)); 1251 } 1252 } 1253 1254 out: 1255 channel->eventq_read_ptr = read_ptr; 1256 return spent; 1257 } 1258 1259 /* Check whether an event is present in the eventq at the current 1260 * read pointer. Only useful for self-test. 1261 */ 1262 bool efx_nic_event_present(struct efx_channel *channel) 1263 { 1264 return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1265 } 1266 1267 /* Allocate buffer table entries for event queue */ 1268 int efx_nic_probe_eventq(struct efx_channel *channel) 1269 { 1270 struct efx_nic *efx = channel->efx; 1271 unsigned entries; 1272 1273 entries = channel->eventq_mask + 1; 1274 return efx_alloc_special_buffer(efx, &channel->eventq, 1275 entries * sizeof(efx_qword_t)); 1276 } 1277 1278 void efx_nic_init_eventq(struct efx_channel *channel) 1279 { 1280 efx_oword_t reg; 1281 struct efx_nic *efx = channel->efx; 1282 1283 netif_dbg(efx, hw, efx->net_dev, 1284 "channel %d event queue in special buffers %d-%d\n", 1285 channel->channel, channel->eventq.index, 1286 channel->eventq.index + channel->eventq.entries - 1); 1287 1288 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1289 EFX_POPULATE_OWORD_3(reg, 1290 FRF_CZ_TIMER_Q_EN, 1, 1291 FRF_CZ_HOST_NOTIFY_MODE, 0, 1292 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1293 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1294 } 1295 1296 /* Pin event queue buffer */ 1297 efx_init_special_buffer(efx, &channel->eventq); 1298 1299 /* Fill event queue with all ones (i.e. empty events) */ 1300 memset(channel->eventq.addr, 0xff, channel->eventq.len); 1301 1302 /* Push event queue to card */ 1303 EFX_POPULATE_OWORD_3(reg, 1304 FRF_AZ_EVQ_EN, 1, 1305 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1306 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1307 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1308 channel->channel); 1309 1310 efx->type->push_irq_moderation(channel); 1311 } 1312 1313 void efx_nic_fini_eventq(struct efx_channel *channel) 1314 { 1315 efx_oword_t reg; 1316 struct efx_nic *efx = channel->efx; 1317 1318 /* Remove event queue from card */ 1319 EFX_ZERO_OWORD(reg); 1320 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1321 channel->channel); 1322 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1323 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1324 1325 /* Unpin event queue */ 1326 efx_fini_special_buffer(efx, &channel->eventq); 1327 } 1328 1329 /* Free buffers backing event queue */ 1330 void efx_nic_remove_eventq(struct efx_channel *channel) 1331 { 1332 efx_free_special_buffer(channel->efx, &channel->eventq); 1333 } 1334 1335 1336 void efx_nic_generate_test_event(struct efx_channel *channel) 1337 { 1338 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 1339 } 1340 1341 void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) 1342 { 1343 efx_magic_event(efx_rx_queue_channel(rx_queue), 1344 EFX_CHANNEL_MAGIC_FILL(rx_queue)); 1345 } 1346 1347 /************************************************************************** 1348 * 1349 * Hardware interrupts 1350 * The hardware interrupt handler does very little work; all the event 1351 * queue processing is carried out by per-channel tasklets. 1352 * 1353 **************************************************************************/ 1354 1355 /* Enable/disable/generate interrupts */ 1356 static inline void efx_nic_interrupts(struct efx_nic *efx, 1357 bool enabled, bool force) 1358 { 1359 efx_oword_t int_en_reg_ker; 1360 1361 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1362 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1363 FRF_AZ_KER_INT_KER, force, 1364 FRF_AZ_DRV_INT_EN_KER, enabled); 1365 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1366 } 1367 1368 void efx_nic_enable_interrupts(struct efx_nic *efx) 1369 { 1370 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1371 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1372 1373 efx_nic_interrupts(efx, true, false); 1374 } 1375 1376 void efx_nic_disable_interrupts(struct efx_nic *efx) 1377 { 1378 /* Disable interrupts */ 1379 efx_nic_interrupts(efx, false, false); 1380 } 1381 1382 /* Generate a test interrupt 1383 * Interrupt must already have been enabled, otherwise nasty things 1384 * may happen. 1385 */ 1386 void efx_nic_generate_interrupt(struct efx_nic *efx) 1387 { 1388 efx_nic_interrupts(efx, true, true); 1389 } 1390 1391 /* Process a fatal interrupt 1392 * Disable bus mastering ASAP and schedule a reset 1393 */ 1394 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) 1395 { 1396 struct falcon_nic_data *nic_data = efx->nic_data; 1397 efx_oword_t *int_ker = efx->irq_status.addr; 1398 efx_oword_t fatal_intr; 1399 int error, mem_perr; 1400 1401 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1402 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1403 1404 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1405 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1406 EFX_OWORD_VAL(fatal_intr), 1407 error ? "disabling bus mastering" : "no recognised error"); 1408 1409 /* If this is a memory parity error dump which blocks are offending */ 1410 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1411 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1412 if (mem_perr) { 1413 efx_oword_t reg; 1414 efx_reado(efx, ®, FR_AZ_MEM_STAT); 1415 netif_err(efx, hw, efx->net_dev, 1416 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1417 EFX_OWORD_VAL(reg)); 1418 } 1419 1420 /* Disable both devices */ 1421 pci_clear_master(efx->pci_dev); 1422 if (efx_nic_is_dual_func(efx)) 1423 pci_clear_master(nic_data->pci_dev2); 1424 efx_nic_disable_interrupts(efx); 1425 1426 /* Count errors and reset or disable the NIC accordingly */ 1427 if (efx->int_error_count == 0 || 1428 time_after(jiffies, efx->int_error_expire)) { 1429 efx->int_error_count = 0; 1430 efx->int_error_expire = 1431 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1432 } 1433 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1434 netif_err(efx, hw, efx->net_dev, 1435 "SYSTEM ERROR - reset scheduled\n"); 1436 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1437 } else { 1438 netif_err(efx, hw, efx->net_dev, 1439 "SYSTEM ERROR - max number of errors seen." 1440 "NIC will be disabled\n"); 1441 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1442 } 1443 1444 return IRQ_HANDLED; 1445 } 1446 1447 /* Handle a legacy interrupt 1448 * Acknowledges the interrupt and schedule event queue processing. 1449 */ 1450 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) 1451 { 1452 struct efx_nic *efx = dev_id; 1453 efx_oword_t *int_ker = efx->irq_status.addr; 1454 irqreturn_t result = IRQ_NONE; 1455 struct efx_channel *channel; 1456 efx_dword_t reg; 1457 u32 queues; 1458 int syserr; 1459 1460 /* Could this be ours? If interrupts are disabled then the 1461 * channel state may not be valid. 1462 */ 1463 if (!efx->legacy_irq_enabled) 1464 return result; 1465 1466 /* Read the ISR which also ACKs the interrupts */ 1467 efx_readd(efx, ®, FR_BZ_INT_ISR0); 1468 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1469 1470 /* Handle non-event-queue sources */ 1471 if (queues & (1U << efx->irq_level)) { 1472 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1473 if (unlikely(syserr)) 1474 return efx_nic_fatal_interrupt(efx); 1475 efx->last_irq_cpu = raw_smp_processor_id(); 1476 } 1477 1478 if (queues != 0) { 1479 if (EFX_WORKAROUND_15783(efx)) 1480 efx->irq_zero_count = 0; 1481 1482 /* Schedule processing of any interrupting queues */ 1483 efx_for_each_channel(channel, efx) { 1484 if (queues & 1) 1485 efx_schedule_channel_irq(channel); 1486 queues >>= 1; 1487 } 1488 result = IRQ_HANDLED; 1489 1490 } else if (EFX_WORKAROUND_15783(efx)) { 1491 efx_qword_t *event; 1492 1493 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1494 * because this might be a shared interrupt. */ 1495 if (efx->irq_zero_count++ == 0) 1496 result = IRQ_HANDLED; 1497 1498 /* Ensure we schedule or rearm all event queues */ 1499 efx_for_each_channel(channel, efx) { 1500 event = efx_event(channel, channel->eventq_read_ptr); 1501 if (efx_event_present(event)) 1502 efx_schedule_channel_irq(channel); 1503 else 1504 efx_nic_eventq_read_ack(channel); 1505 } 1506 } 1507 1508 if (result == IRQ_HANDLED) 1509 netif_vdbg(efx, intr, efx->net_dev, 1510 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1511 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1512 1513 return result; 1514 } 1515 1516 /* Handle an MSI interrupt 1517 * 1518 * Handle an MSI hardware interrupt. This routine schedules event 1519 * queue processing. No interrupt acknowledgement cycle is necessary. 1520 * Also, we never need to check that the interrupt is for us, since 1521 * MSI interrupts cannot be shared. 1522 */ 1523 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1524 { 1525 struct efx_channel *channel = *(struct efx_channel **)dev_id; 1526 struct efx_nic *efx = channel->efx; 1527 efx_oword_t *int_ker = efx->irq_status.addr; 1528 int syserr; 1529 1530 netif_vdbg(efx, intr, efx->net_dev, 1531 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1532 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1533 1534 /* Handle non-event-queue sources */ 1535 if (channel->channel == efx->irq_level) { 1536 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1537 if (unlikely(syserr)) 1538 return efx_nic_fatal_interrupt(efx); 1539 efx->last_irq_cpu = raw_smp_processor_id(); 1540 } 1541 1542 /* Schedule processing of the channel */ 1543 efx_schedule_channel_irq(channel); 1544 1545 return IRQ_HANDLED; 1546 } 1547 1548 1549 /* Setup RSS indirection table. 1550 * This maps from the hash value of the packet to RXQ 1551 */ 1552 void efx_nic_push_rx_indir_table(struct efx_nic *efx) 1553 { 1554 size_t i = 0; 1555 efx_dword_t dword; 1556 1557 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) 1558 return; 1559 1560 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1561 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1562 1563 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1564 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1565 efx->rx_indir_table[i]); 1566 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); 1567 } 1568 } 1569 1570 /* Hook interrupt handler(s) 1571 * Try MSI and then legacy interrupts. 1572 */ 1573 int efx_nic_init_interrupt(struct efx_nic *efx) 1574 { 1575 struct efx_channel *channel; 1576 int rc; 1577 1578 if (!EFX_INT_MODE_USE_MSI(efx)) { 1579 irq_handler_t handler; 1580 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1581 handler = efx_legacy_interrupt; 1582 else 1583 handler = falcon_legacy_interrupt_a1; 1584 1585 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, 1586 efx->name, efx); 1587 if (rc) { 1588 netif_err(efx, drv, efx->net_dev, 1589 "failed to hook legacy IRQ %d\n", 1590 efx->pci_dev->irq); 1591 goto fail1; 1592 } 1593 return 0; 1594 } 1595 1596 /* Hook MSI or MSI-X interrupt */ 1597 efx_for_each_channel(channel, efx) { 1598 rc = request_irq(channel->irq, efx_msi_interrupt, 1599 IRQF_PROBE_SHARED, /* Not shared */ 1600 efx->channel_name[channel->channel], 1601 &efx->channel[channel->channel]); 1602 if (rc) { 1603 netif_err(efx, drv, efx->net_dev, 1604 "failed to hook IRQ %d\n", channel->irq); 1605 goto fail2; 1606 } 1607 } 1608 1609 return 0; 1610 1611 fail2: 1612 efx_for_each_channel(channel, efx) 1613 free_irq(channel->irq, &efx->channel[channel->channel]); 1614 fail1: 1615 return rc; 1616 } 1617 1618 void efx_nic_fini_interrupt(struct efx_nic *efx) 1619 { 1620 struct efx_channel *channel; 1621 efx_oword_t reg; 1622 1623 /* Disable MSI/MSI-X interrupts */ 1624 efx_for_each_channel(channel, efx) { 1625 if (channel->irq) 1626 free_irq(channel->irq, &efx->channel[channel->channel]); 1627 } 1628 1629 /* ACK legacy interrupt */ 1630 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1631 efx_reado(efx, ®, FR_BZ_INT_ISR0); 1632 else 1633 falcon_irq_ack_a1(efx); 1634 1635 /* Disable legacy interrupt */ 1636 if (efx->legacy_irq) 1637 free_irq(efx->legacy_irq, efx); 1638 } 1639 1640 /* Looks at available SRAM resources and works out how many queues we 1641 * can support, and where things like descriptor caches should live. 1642 * 1643 * SRAM is split up as follows: 1644 * 0 buftbl entries for channels 1645 * efx->vf_buftbl_base buftbl entries for SR-IOV 1646 * efx->rx_dc_base RX descriptor caches 1647 * efx->tx_dc_base TX descriptor caches 1648 */ 1649 void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) 1650 { 1651 unsigned vi_count, buftbl_min; 1652 1653 /* Account for the buffer table entries backing the datapath channels 1654 * and the descriptor caches for those channels. 1655 */ 1656 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + 1657 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + 1658 efx->n_channels * EFX_MAX_EVQ_SIZE) 1659 * sizeof(efx_qword_t) / EFX_BUF_SIZE); 1660 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); 1661 1662 #ifdef CONFIG_SFC_SRIOV 1663 if (efx_sriov_wanted(efx)) { 1664 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; 1665 1666 efx->vf_buftbl_base = buftbl_min; 1667 1668 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; 1669 vi_count = max(vi_count, EFX_VI_BASE); 1670 buftbl_free = (sram_lim_qw - buftbl_min - 1671 vi_count * vi_dc_entries); 1672 1673 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * 1674 efx_vf_size(efx)); 1675 vf_limit = min(buftbl_free / entries_per_vf, 1676 (1024U - EFX_VI_BASE) >> efx->vi_scale); 1677 1678 if (efx->vf_count > vf_limit) { 1679 netif_err(efx, probe, efx->net_dev, 1680 "Reducing VF count from from %d to %d\n", 1681 efx->vf_count, vf_limit); 1682 efx->vf_count = vf_limit; 1683 } 1684 vi_count += efx->vf_count * efx_vf_size(efx); 1685 } 1686 #endif 1687 1688 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; 1689 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; 1690 } 1691 1692 u32 efx_nic_fpga_ver(struct efx_nic *efx) 1693 { 1694 efx_oword_t altera_build; 1695 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1696 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1697 } 1698 1699 void efx_nic_init_common(struct efx_nic *efx) 1700 { 1701 efx_oword_t temp; 1702 1703 /* Set positions of descriptor caches in SRAM. */ 1704 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); 1705 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1706 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); 1707 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1708 1709 /* Set TX descriptor cache size. */ 1710 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1711 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1712 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1713 1714 /* Set RX descriptor cache size. Set low watermark to size-8, as 1715 * this allows most efficient prefetching. 1716 */ 1717 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1718 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1719 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1720 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1721 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1722 1723 /* Program INT_KER address */ 1724 EFX_POPULATE_OWORD_2(temp, 1725 FRF_AZ_NORM_INT_VEC_DIS_KER, 1726 EFX_INT_MODE_USE_MSI(efx), 1727 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1728 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1729 1730 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1731 /* Use an interrupt level unused by event queues */ 1732 efx->irq_level = 0x1f; 1733 else 1734 /* Use a valid MSI-X vector */ 1735 efx->irq_level = 0; 1736 1737 /* Enable all the genuinely fatal interrupts. (They are still 1738 * masked by the overall interrupt mask, controlled by 1739 * falcon_interrupts()). 1740 * 1741 * Note: All other fatal interrupts are enabled 1742 */ 1743 EFX_POPULATE_OWORD_3(temp, 1744 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1745 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1746 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1747 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1748 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1749 EFX_INVERT_OWORD(temp); 1750 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1751 1752 efx_nic_push_rx_indir_table(efx); 1753 1754 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1755 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1756 */ 1757 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1758 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1759 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1760 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1761 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1762 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1763 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1764 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1765 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1766 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1767 /* Disable hardware watchdog which can misfire */ 1768 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1769 /* Squash TX of packets of 16 bytes or less */ 1770 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1771 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1772 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1773 1774 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 1775 EFX_POPULATE_OWORD_4(temp, 1776 /* Default values */ 1777 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1778 FRF_BZ_TX_PACE_SB_AF, 0xb, 1779 FRF_BZ_TX_PACE_FB_BASE, 0, 1780 /* Allow large pace values in the 1781 * fast bin. */ 1782 FRF_BZ_TX_PACE_BIN_TH, 1783 FFE_BZ_TX_PACE_RESERVED); 1784 efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1785 } 1786 } 1787 1788 /* Register dump */ 1789 1790 #define REGISTER_REVISION_A 1 1791 #define REGISTER_REVISION_B 2 1792 #define REGISTER_REVISION_C 3 1793 #define REGISTER_REVISION_Z 3 /* latest revision */ 1794 1795 struct efx_nic_reg { 1796 u32 offset:24; 1797 u32 min_revision:2, max_revision:2; 1798 }; 1799 1800 #define REGISTER(name, min_rev, max_rev) { \ 1801 FR_ ## min_rev ## max_rev ## _ ## name, \ 1802 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 1803 } 1804 #define REGISTER_AA(name) REGISTER(name, A, A) 1805 #define REGISTER_AB(name) REGISTER(name, A, B) 1806 #define REGISTER_AZ(name) REGISTER(name, A, Z) 1807 #define REGISTER_BB(name) REGISTER(name, B, B) 1808 #define REGISTER_BZ(name) REGISTER(name, B, Z) 1809 #define REGISTER_CZ(name) REGISTER(name, C, Z) 1810 1811 static const struct efx_nic_reg efx_nic_regs[] = { 1812 REGISTER_AZ(ADR_REGION), 1813 REGISTER_AZ(INT_EN_KER), 1814 REGISTER_BZ(INT_EN_CHAR), 1815 REGISTER_AZ(INT_ADR_KER), 1816 REGISTER_BZ(INT_ADR_CHAR), 1817 /* INT_ACK_KER is WO */ 1818 /* INT_ISR0 is RC */ 1819 REGISTER_AZ(HW_INIT), 1820 REGISTER_CZ(USR_EV_CFG), 1821 REGISTER_AB(EE_SPI_HCMD), 1822 REGISTER_AB(EE_SPI_HADR), 1823 REGISTER_AB(EE_SPI_HDATA), 1824 REGISTER_AB(EE_BASE_PAGE), 1825 REGISTER_AB(EE_VPD_CFG0), 1826 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ 1827 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ 1828 /* PCIE_CORE_INDIRECT is indirect */ 1829 REGISTER_AB(NIC_STAT), 1830 REGISTER_AB(GPIO_CTL), 1831 REGISTER_AB(GLB_CTL), 1832 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ 1833 REGISTER_BZ(DP_CTRL), 1834 REGISTER_AZ(MEM_STAT), 1835 REGISTER_AZ(CS_DEBUG), 1836 REGISTER_AZ(ALTERA_BUILD), 1837 REGISTER_AZ(CSR_SPARE), 1838 REGISTER_AB(PCIE_SD_CTL0123), 1839 REGISTER_AB(PCIE_SD_CTL45), 1840 REGISTER_AB(PCIE_PCS_CTL_STAT), 1841 /* DEBUG_DATA_OUT is not used */ 1842 /* DRV_EV is WO */ 1843 REGISTER_AZ(EVQ_CTL), 1844 REGISTER_AZ(EVQ_CNT1), 1845 REGISTER_AZ(EVQ_CNT2), 1846 REGISTER_AZ(BUF_TBL_CFG), 1847 REGISTER_AZ(SRM_RX_DC_CFG), 1848 REGISTER_AZ(SRM_TX_DC_CFG), 1849 REGISTER_AZ(SRM_CFG), 1850 /* BUF_TBL_UPD is WO */ 1851 REGISTER_AZ(SRM_UPD_EVQ), 1852 REGISTER_AZ(SRAM_PARITY), 1853 REGISTER_AZ(RX_CFG), 1854 REGISTER_BZ(RX_FILTER_CTL), 1855 /* RX_FLUSH_DESCQ is WO */ 1856 REGISTER_AZ(RX_DC_CFG), 1857 REGISTER_AZ(RX_DC_PF_WM), 1858 REGISTER_BZ(RX_RSS_TKEY), 1859 /* RX_NODESC_DROP is RC */ 1860 REGISTER_AA(RX_SELF_RST), 1861 /* RX_DEBUG, RX_PUSH_DROP are not used */ 1862 REGISTER_CZ(RX_RSS_IPV6_REG1), 1863 REGISTER_CZ(RX_RSS_IPV6_REG2), 1864 REGISTER_CZ(RX_RSS_IPV6_REG3), 1865 /* TX_FLUSH_DESCQ is WO */ 1866 REGISTER_AZ(TX_DC_CFG), 1867 REGISTER_AA(TX_CHKSM_CFG), 1868 REGISTER_AZ(TX_CFG), 1869 /* TX_PUSH_DROP is not used */ 1870 REGISTER_AZ(TX_RESERVED), 1871 REGISTER_BZ(TX_PACE), 1872 /* TX_PACE_DROP_QID is RC */ 1873 REGISTER_BB(TX_VLAN), 1874 REGISTER_BZ(TX_IPFIL_PORTEN), 1875 REGISTER_AB(MD_TXD), 1876 REGISTER_AB(MD_RXD), 1877 REGISTER_AB(MD_CS), 1878 REGISTER_AB(MD_PHY_ADR), 1879 REGISTER_AB(MD_ID), 1880 /* MD_STAT is RC */ 1881 REGISTER_AB(MAC_STAT_DMA), 1882 REGISTER_AB(MAC_CTRL), 1883 REGISTER_BB(GEN_MODE), 1884 REGISTER_AB(MAC_MC_HASH_REG0), 1885 REGISTER_AB(MAC_MC_HASH_REG1), 1886 REGISTER_AB(GM_CFG1), 1887 REGISTER_AB(GM_CFG2), 1888 /* GM_IPG and GM_HD are not used */ 1889 REGISTER_AB(GM_MAX_FLEN), 1890 /* GM_TEST is not used */ 1891 REGISTER_AB(GM_ADR1), 1892 REGISTER_AB(GM_ADR2), 1893 REGISTER_AB(GMF_CFG0), 1894 REGISTER_AB(GMF_CFG1), 1895 REGISTER_AB(GMF_CFG2), 1896 REGISTER_AB(GMF_CFG3), 1897 REGISTER_AB(GMF_CFG4), 1898 REGISTER_AB(GMF_CFG5), 1899 REGISTER_BB(TX_SRC_MAC_CTL), 1900 REGISTER_AB(XM_ADR_LO), 1901 REGISTER_AB(XM_ADR_HI), 1902 REGISTER_AB(XM_GLB_CFG), 1903 REGISTER_AB(XM_TX_CFG), 1904 REGISTER_AB(XM_RX_CFG), 1905 REGISTER_AB(XM_MGT_INT_MASK), 1906 REGISTER_AB(XM_FC), 1907 REGISTER_AB(XM_PAUSE_TIME), 1908 REGISTER_AB(XM_TX_PARAM), 1909 REGISTER_AB(XM_RX_PARAM), 1910 /* XM_MGT_INT_MSK (note no 'A') is RC */ 1911 REGISTER_AB(XX_PWR_RST), 1912 REGISTER_AB(XX_SD_CTL), 1913 REGISTER_AB(XX_TXDRV_CTL), 1914 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 1915 /* XX_CORE_STAT is partly RC */ 1916 }; 1917 1918 struct efx_nic_reg_table { 1919 u32 offset:24; 1920 u32 min_revision:2, max_revision:2; 1921 u32 step:6, rows:21; 1922 }; 1923 1924 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 1925 offset, \ 1926 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 1927 step, rows \ 1928 } 1929 #define REGISTER_TABLE(name, min_rev, max_rev) \ 1930 REGISTER_TABLE_DIMENSIONS( \ 1931 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 1932 min_rev, max_rev, \ 1933 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 1934 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 1935 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 1936 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 1937 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 1938 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 1939 #define REGISTER_TABLE_BB_CZ(name) \ 1940 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 1941 FR_BZ_ ## name ## _STEP, \ 1942 FR_BB_ ## name ## _ROWS), \ 1943 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 1944 FR_BZ_ ## name ## _STEP, \ 1945 FR_CZ_ ## name ## _ROWS) 1946 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 1947 1948 static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 1949 /* DRIVER is not used */ 1950 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ 1951 REGISTER_TABLE_BB(TX_IPFIL_TBL), 1952 REGISTER_TABLE_BB(TX_SRC_MAC_TBL), 1953 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), 1954 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), 1955 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), 1956 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 1957 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 1958 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 1959 /* We can't reasonably read all of the buffer table (up to 8MB!). 1960 * However this driver will only use a few entries. Reading 1961 * 1K entries allows for some expansion of queue count and 1962 * size before we need to change the version. */ 1963 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 1964 A, A, 8, 1024), 1965 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 1966 B, Z, 8, 1024), 1967 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 1968 REGISTER_TABLE_BB_CZ(TIMER_TBL), 1969 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 1970 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), 1971 /* TX_FILTER_TBL0 is huge and not used by this driver */ 1972 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), 1973 REGISTER_TABLE_CZ(MC_TREG_SMEM), 1974 /* MSIX_PBA_TABLE is not mapped */ 1975 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 1976 REGISTER_TABLE_BZ(RX_FILTER_TBL0), 1977 }; 1978 1979 size_t efx_nic_get_regs_len(struct efx_nic *efx) 1980 { 1981 const struct efx_nic_reg *reg; 1982 const struct efx_nic_reg_table *table; 1983 size_t len = 0; 1984 1985 for (reg = efx_nic_regs; 1986 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 1987 reg++) 1988 if (efx->type->revision >= reg->min_revision && 1989 efx->type->revision <= reg->max_revision) 1990 len += sizeof(efx_oword_t); 1991 1992 for (table = efx_nic_reg_tables; 1993 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 1994 table++) 1995 if (efx->type->revision >= table->min_revision && 1996 efx->type->revision <= table->max_revision) 1997 len += table->rows * min_t(size_t, table->step, 16); 1998 1999 return len; 2000 } 2001 2002 void efx_nic_get_regs(struct efx_nic *efx, void *buf) 2003 { 2004 const struct efx_nic_reg *reg; 2005 const struct efx_nic_reg_table *table; 2006 2007 for (reg = efx_nic_regs; 2008 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 2009 reg++) { 2010 if (efx->type->revision >= reg->min_revision && 2011 efx->type->revision <= reg->max_revision) { 2012 efx_reado(efx, (efx_oword_t *)buf, reg->offset); 2013 buf += sizeof(efx_oword_t); 2014 } 2015 } 2016 2017 for (table = efx_nic_reg_tables; 2018 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2019 table++) { 2020 size_t size, i; 2021 2022 if (!(efx->type->revision >= table->min_revision && 2023 efx->type->revision <= table->max_revision)) 2024 continue; 2025 2026 size = min_t(size_t, table->step, 16); 2027 2028 for (i = 0; i < table->rows; i++) { 2029 switch (table->step) { 2030 case 4: /* 32-bit register or SRAM */ 2031 efx_readd_table(efx, buf, table->offset, i); 2032 break; 2033 case 8: /* 64-bit SRAM */ 2034 efx_sram_readq(efx, 2035 efx->membase + table->offset, 2036 buf, i); 2037 break; 2038 case 16: /* 128-bit register */ 2039 efx_reado_table(efx, buf, table->offset, i); 2040 break; 2041 case 32: /* 128-bit register, interleaved */ 2042 efx_reado_table(efx, buf, table->offset, 2 * i); 2043 break; 2044 default: 2045 WARN_ON(1); 2046 return; 2047 } 2048 buf += size; 2049 } 2050 } 2051 } 2052