1 /**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2011 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/pci.h> 15 #include <linux/module.h> 16 #include <linux/seq_file.h> 17 #include "net_driver.h" 18 #include "bitfield.h" 19 #include "efx.h" 20 #include "nic.h" 21 #include "regs.h" 22 #include "io.h" 23 #include "workarounds.h" 24 25 /************************************************************************** 26 * 27 * Configurable values 28 * 29 ************************************************************************** 30 */ 31 32 /* This is set to 16 for a good reason. In summary, if larger than 33 * 16, the descriptor cache holds more than a default socket 34 * buffer's worth of packets (for UDP we can only have at most one 35 * socket buffer's worth outstanding). This combined with the fact 36 * that we only get 1 TX event per descriptor cache means the NIC 37 * goes idle. 38 */ 39 #define TX_DC_ENTRIES 16 40 #define TX_DC_ENTRIES_ORDER 1 41 42 #define RX_DC_ENTRIES 64 43 #define RX_DC_ENTRIES_ORDER 3 44 45 /* If EFX_MAX_INT_ERRORS internal errors occur within 46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 47 * disable it. 48 */ 49 #define EFX_INT_ERROR_EXPIRE 3600 50 #define EFX_MAX_INT_ERRORS 5 51 52 /* Depth of RX flush request fifo */ 53 #define EFX_RX_FLUSH_COUNT 4 54 55 /* Driver generated events */ 56 #define _EFX_CHANNEL_MAGIC_TEST 0x000101 57 #define _EFX_CHANNEL_MAGIC_FILL 0x000102 58 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 59 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 60 61 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 62 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 63 64 #define EFX_CHANNEL_MAGIC_TEST(_channel) \ 65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) 66 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ 67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ 68 efx_rx_queue_index(_rx_queue)) 69 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ 70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ 71 efx_rx_queue_index(_rx_queue)) 72 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ 73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ 74 (_tx_queue)->queue) 75 76 static void efx_magic_event(struct efx_channel *channel, u32 magic); 77 78 /************************************************************************** 79 * 80 * Solarstorm hardware access 81 * 82 **************************************************************************/ 83 84 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 85 unsigned int index) 86 { 87 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 88 value, index); 89 } 90 91 /* Read the current event from the event queue */ 92 static inline efx_qword_t *efx_event(struct efx_channel *channel, 93 unsigned int index) 94 { 95 return ((efx_qword_t *) (channel->eventq.addr)) + 96 (index & channel->eventq_mask); 97 } 98 99 /* See if an event is present 100 * 101 * We check both the high and low dword of the event for all ones. We 102 * wrote all ones when we cleared the event, and no valid event can 103 * have all ones in either its high or low dwords. This approach is 104 * robust against reordering. 105 * 106 * Note that using a single 64-bit comparison is incorrect; even 107 * though the CPU read will be atomic, the DMA write may not be. 108 */ 109 static inline int efx_event_present(efx_qword_t *event) 110 { 111 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 112 EFX_DWORD_IS_ALL_ONES(event->dword[1])); 113 } 114 115 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 116 const efx_oword_t *mask) 117 { 118 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 119 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 120 } 121 122 int efx_nic_test_registers(struct efx_nic *efx, 123 const struct efx_nic_register_test *regs, 124 size_t n_regs) 125 { 126 unsigned address = 0, i, j; 127 efx_oword_t mask, imask, original, reg, buf; 128 129 for (i = 0; i < n_regs; ++i) { 130 address = regs[i].address; 131 mask = imask = regs[i].mask; 132 EFX_INVERT_OWORD(imask); 133 134 efx_reado(efx, &original, address); 135 136 /* bit sweep on and off */ 137 for (j = 0; j < 128; j++) { 138 if (!EFX_EXTRACT_OWORD32(mask, j, j)) 139 continue; 140 141 /* Test this testable bit can be set in isolation */ 142 EFX_AND_OWORD(reg, original, mask); 143 EFX_SET_OWORD32(reg, j, j, 1); 144 145 efx_writeo(efx, ®, address); 146 efx_reado(efx, &buf, address); 147 148 if (efx_masked_compare_oword(®, &buf, &mask)) 149 goto fail; 150 151 /* Test this testable bit can be cleared in isolation */ 152 EFX_OR_OWORD(reg, original, mask); 153 EFX_SET_OWORD32(reg, j, j, 0); 154 155 efx_writeo(efx, ®, address); 156 efx_reado(efx, &buf, address); 157 158 if (efx_masked_compare_oword(®, &buf, &mask)) 159 goto fail; 160 } 161 162 efx_writeo(efx, &original, address); 163 } 164 165 return 0; 166 167 fail: 168 netif_err(efx, hw, efx->net_dev, 169 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 170 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 171 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 172 return -EIO; 173 } 174 175 /************************************************************************** 176 * 177 * Special buffer handling 178 * Special buffers are used for event queues and the TX and RX 179 * descriptor rings. 180 * 181 *************************************************************************/ 182 183 /* 184 * Initialise a special buffer 185 * 186 * This will define a buffer (previously allocated via 187 * efx_alloc_special_buffer()) in the buffer table, allowing 188 * it to be used for event queues, descriptor rings etc. 189 */ 190 static void 191 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 192 { 193 efx_qword_t buf_desc; 194 unsigned int index; 195 dma_addr_t dma_addr; 196 int i; 197 198 EFX_BUG_ON_PARANOID(!buffer->addr); 199 200 /* Write buffer descriptors to NIC */ 201 for (i = 0; i < buffer->entries; i++) { 202 index = buffer->index + i; 203 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE); 204 netif_dbg(efx, probe, efx->net_dev, 205 "mapping special buffer %d at %llx\n", 206 index, (unsigned long long)dma_addr); 207 EFX_POPULATE_QWORD_3(buf_desc, 208 FRF_AZ_BUF_ADR_REGION, 0, 209 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 210 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 211 efx_write_buf_tbl(efx, &buf_desc, index); 212 } 213 } 214 215 /* Unmaps a buffer and clears the buffer table entries */ 216 static void 217 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 218 { 219 efx_oword_t buf_tbl_upd; 220 unsigned int start = buffer->index; 221 unsigned int end = (buffer->index + buffer->entries - 1); 222 223 if (!buffer->entries) 224 return; 225 226 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 227 buffer->index, buffer->index + buffer->entries - 1); 228 229 EFX_POPULATE_OWORD_4(buf_tbl_upd, 230 FRF_AZ_BUF_UPD_CMD, 0, 231 FRF_AZ_BUF_CLR_CMD, 1, 232 FRF_AZ_BUF_CLR_END_ID, end, 233 FRF_AZ_BUF_CLR_START_ID, start); 234 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 235 } 236 237 /* 238 * Allocate a new special buffer 239 * 240 * This allocates memory for a new buffer, clears it and allocates a 241 * new buffer ID range. It does not write into the buffer table. 242 * 243 * This call will allocate 4KB buffers, since 8KB buffers can't be 244 * used for event queues and descriptor rings. 245 */ 246 static int efx_alloc_special_buffer(struct efx_nic *efx, 247 struct efx_special_buffer *buffer, 248 unsigned int len) 249 { 250 len = ALIGN(len, EFX_BUF_SIZE); 251 252 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 253 &buffer->dma_addr, GFP_KERNEL); 254 if (!buffer->addr) 255 return -ENOMEM; 256 buffer->len = len; 257 buffer->entries = len / EFX_BUF_SIZE; 258 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); 259 260 /* Select new buffer ID */ 261 buffer->index = efx->next_buffer_table; 262 efx->next_buffer_table += buffer->entries; 263 #ifdef CONFIG_SFC_SRIOV 264 BUG_ON(efx_sriov_enabled(efx) && 265 efx->vf_buftbl_base < efx->next_buffer_table); 266 #endif 267 268 netif_dbg(efx, probe, efx->net_dev, 269 "allocating special buffers %d-%d at %llx+%x " 270 "(virt %p phys %llx)\n", buffer->index, 271 buffer->index + buffer->entries - 1, 272 (u64)buffer->dma_addr, len, 273 buffer->addr, (u64)virt_to_phys(buffer->addr)); 274 275 return 0; 276 } 277 278 static void 279 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 280 { 281 if (!buffer->addr) 282 return; 283 284 netif_dbg(efx, hw, efx->net_dev, 285 "deallocating special buffers %d-%d at %llx+%x " 286 "(virt %p phys %llx)\n", buffer->index, 287 buffer->index + buffer->entries - 1, 288 (u64)buffer->dma_addr, buffer->len, 289 buffer->addr, (u64)virt_to_phys(buffer->addr)); 290 291 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, 292 buffer->dma_addr); 293 buffer->addr = NULL; 294 buffer->entries = 0; 295 } 296 297 /************************************************************************** 298 * 299 * Generic buffer handling 300 * These buffers are used for interrupt status, MAC stats, etc. 301 * 302 **************************************************************************/ 303 304 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 305 unsigned int len) 306 { 307 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 308 &buffer->dma_addr, GFP_ATOMIC); 309 if (!buffer->addr) 310 return -ENOMEM; 311 buffer->len = len; 312 memset(buffer->addr, 0, len); 313 return 0; 314 } 315 316 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 317 { 318 if (buffer->addr) { 319 dma_free_coherent(&efx->pci_dev->dev, buffer->len, 320 buffer->addr, buffer->dma_addr); 321 buffer->addr = NULL; 322 } 323 } 324 325 /************************************************************************** 326 * 327 * TX path 328 * 329 **************************************************************************/ 330 331 /* Returns a pointer to the specified transmit descriptor in the TX 332 * descriptor queue belonging to the specified channel. 333 */ 334 static inline efx_qword_t * 335 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 336 { 337 return ((efx_qword_t *) (tx_queue->txd.addr)) + index; 338 } 339 340 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 341 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) 342 { 343 unsigned write_ptr; 344 efx_dword_t reg; 345 346 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 347 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 348 efx_writed_page(tx_queue->efx, ®, 349 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 350 } 351 352 /* Write pointer and first descriptor for TX descriptor ring */ 353 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, 354 const efx_qword_t *txd) 355 { 356 unsigned write_ptr; 357 efx_oword_t reg; 358 359 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 360 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 361 362 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 363 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 364 FRF_AZ_TX_DESC_WPTR, write_ptr); 365 reg.qword[0] = *txd; 366 efx_writeo_page(tx_queue->efx, ®, 367 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 368 } 369 370 static inline bool 371 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) 372 { 373 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 374 375 if (empty_read_count == 0) 376 return false; 377 378 tx_queue->empty_read_count = 0; 379 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 380 && tx_queue->write_count - write_count == 1; 381 } 382 383 /* For each entry inserted into the software descriptor ring, create a 384 * descriptor in the hardware TX descriptor ring (in host memory), and 385 * write a doorbell. 386 */ 387 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 388 { 389 390 struct efx_tx_buffer *buffer; 391 efx_qword_t *txd; 392 unsigned write_ptr; 393 unsigned old_write_count = tx_queue->write_count; 394 395 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 396 397 do { 398 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 399 buffer = &tx_queue->buffer[write_ptr]; 400 txd = efx_tx_desc(tx_queue, write_ptr); 401 ++tx_queue->write_count; 402 403 /* Create TX descriptor ring entry */ 404 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 405 EFX_POPULATE_QWORD_4(*txd, 406 FSF_AZ_TX_KER_CONT, 407 buffer->flags & EFX_TX_BUF_CONT, 408 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 409 FSF_AZ_TX_KER_BUF_REGION, 0, 410 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 411 } while (tx_queue->write_count != tx_queue->insert_count); 412 413 wmb(); /* Ensure descriptors are written before they are fetched */ 414 415 if (efx_may_push_tx_desc(tx_queue, old_write_count)) { 416 txd = efx_tx_desc(tx_queue, 417 old_write_count & tx_queue->ptr_mask); 418 efx_push_tx_desc(tx_queue, txd); 419 ++tx_queue->pushes; 420 } else { 421 efx_notify_tx_desc(tx_queue); 422 } 423 } 424 425 /* Allocate hardware resources for a TX queue */ 426 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 427 { 428 struct efx_nic *efx = tx_queue->efx; 429 unsigned entries; 430 431 entries = tx_queue->ptr_mask + 1; 432 return efx_alloc_special_buffer(efx, &tx_queue->txd, 433 entries * sizeof(efx_qword_t)); 434 } 435 436 void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 437 { 438 struct efx_nic *efx = tx_queue->efx; 439 efx_oword_t reg; 440 441 /* Pin TX descriptor ring */ 442 efx_init_special_buffer(efx, &tx_queue->txd); 443 444 /* Push TX descriptor ring to card */ 445 EFX_POPULATE_OWORD_10(reg, 446 FRF_AZ_TX_DESCQ_EN, 1, 447 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 448 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 449 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 450 FRF_AZ_TX_DESCQ_EVQ_ID, 451 tx_queue->channel->channel, 452 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 453 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 454 FRF_AZ_TX_DESCQ_SIZE, 455 __ffs(tx_queue->txd.entries), 456 FRF_AZ_TX_DESCQ_TYPE, 0, 457 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 458 459 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 460 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 461 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 462 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, 463 !csum); 464 } 465 466 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 467 tx_queue->queue); 468 469 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 470 /* Only 128 bits in this register */ 471 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 472 473 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 474 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 475 __clear_bit_le(tx_queue->queue, ®); 476 else 477 __set_bit_le(tx_queue->queue, ®); 478 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 479 } 480 481 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 482 EFX_POPULATE_OWORD_1(reg, 483 FRF_BZ_TX_PACE, 484 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 485 FFE_BZ_TX_PACE_OFF : 486 FFE_BZ_TX_PACE_RESERVED); 487 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, 488 tx_queue->queue); 489 } 490 } 491 492 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 493 { 494 struct efx_nic *efx = tx_queue->efx; 495 efx_oword_t tx_flush_descq; 496 497 WARN_ON(atomic_read(&tx_queue->flush_outstanding)); 498 atomic_set(&tx_queue->flush_outstanding, 1); 499 500 EFX_POPULATE_OWORD_2(tx_flush_descq, 501 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 502 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 503 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 504 } 505 506 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) 507 { 508 struct efx_nic *efx = tx_queue->efx; 509 efx_oword_t tx_desc_ptr; 510 511 /* Remove TX descriptor ring from card */ 512 EFX_ZERO_OWORD(tx_desc_ptr); 513 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 514 tx_queue->queue); 515 516 /* Unpin TX descriptor ring */ 517 efx_fini_special_buffer(efx, &tx_queue->txd); 518 } 519 520 /* Free buffers backing TX queue */ 521 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 522 { 523 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 524 } 525 526 /************************************************************************** 527 * 528 * RX path 529 * 530 **************************************************************************/ 531 532 /* Returns a pointer to the specified descriptor in the RX descriptor queue */ 533 static inline efx_qword_t * 534 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 535 { 536 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; 537 } 538 539 /* This creates an entry in the RX descriptor queue */ 540 static inline void 541 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 542 { 543 struct efx_rx_buffer *rx_buf; 544 efx_qword_t *rxd; 545 546 rxd = efx_rx_desc(rx_queue, index); 547 rx_buf = efx_rx_buffer(rx_queue, index); 548 EFX_POPULATE_QWORD_3(*rxd, 549 FSF_AZ_RX_KER_BUF_SIZE, 550 rx_buf->len - 551 rx_queue->efx->type->rx_buffer_padding, 552 FSF_AZ_RX_KER_BUF_REGION, 0, 553 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 554 } 555 556 /* This writes to the RX_DESC_WPTR register for the specified receive 557 * descriptor ring. 558 */ 559 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 560 { 561 struct efx_nic *efx = rx_queue->efx; 562 efx_dword_t reg; 563 unsigned write_ptr; 564 565 while (rx_queue->notified_count != rx_queue->added_count) { 566 efx_build_rx_desc( 567 rx_queue, 568 rx_queue->notified_count & rx_queue->ptr_mask); 569 ++rx_queue->notified_count; 570 } 571 572 wmb(); 573 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 574 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 575 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 576 efx_rx_queue_index(rx_queue)); 577 } 578 579 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 580 { 581 struct efx_nic *efx = rx_queue->efx; 582 unsigned entries; 583 584 entries = rx_queue->ptr_mask + 1; 585 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 586 entries * sizeof(efx_qword_t)); 587 } 588 589 void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 590 { 591 efx_oword_t rx_desc_ptr; 592 struct efx_nic *efx = rx_queue->efx; 593 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 594 bool iscsi_digest_en = is_b0; 595 596 netif_dbg(efx, hw, efx->net_dev, 597 "RX queue %d ring in special buffers %d-%d\n", 598 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 599 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 600 601 /* Pin RX descriptor ring */ 602 efx_init_special_buffer(efx, &rx_queue->rxd); 603 604 /* Push RX descriptor ring to card */ 605 EFX_POPULATE_OWORD_10(rx_desc_ptr, 606 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 607 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 608 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 609 FRF_AZ_RX_DESCQ_EVQ_ID, 610 efx_rx_queue_channel(rx_queue)->channel, 611 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 612 FRF_AZ_RX_DESCQ_LABEL, 613 efx_rx_queue_index(rx_queue), 614 FRF_AZ_RX_DESCQ_SIZE, 615 __ffs(rx_queue->rxd.entries), 616 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 617 /* For >=B0 this is scatter so disable */ 618 FRF_AZ_RX_DESCQ_JUMBO, !is_b0, 619 FRF_AZ_RX_DESCQ_EN, 1); 620 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 621 efx_rx_queue_index(rx_queue)); 622 } 623 624 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 625 { 626 struct efx_nic *efx = rx_queue->efx; 627 efx_oword_t rx_flush_descq; 628 629 EFX_POPULATE_OWORD_2(rx_flush_descq, 630 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 631 FRF_AZ_RX_FLUSH_DESCQ, 632 efx_rx_queue_index(rx_queue)); 633 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 634 } 635 636 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) 637 { 638 efx_oword_t rx_desc_ptr; 639 struct efx_nic *efx = rx_queue->efx; 640 641 /* Remove RX descriptor ring from card */ 642 EFX_ZERO_OWORD(rx_desc_ptr); 643 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 644 efx_rx_queue_index(rx_queue)); 645 646 /* Unpin RX descriptor ring */ 647 efx_fini_special_buffer(efx, &rx_queue->rxd); 648 } 649 650 /* Free buffers backing RX queue */ 651 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 652 { 653 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 654 } 655 656 /************************************************************************** 657 * 658 * Flush handling 659 * 660 **************************************************************************/ 661 662 /* efx_nic_flush_queues() must be woken up when all flushes are completed, 663 * or more RX flushes can be kicked off. 664 */ 665 static bool efx_flush_wake(struct efx_nic *efx) 666 { 667 /* Ensure that all updates are visible to efx_nic_flush_queues() */ 668 smp_mb(); 669 670 return (atomic_read(&efx->drain_pending) == 0 || 671 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT 672 && atomic_read(&efx->rxq_flush_pending) > 0)); 673 } 674 675 static bool efx_check_tx_flush_complete(struct efx_nic *efx) 676 { 677 bool i = true; 678 efx_oword_t txd_ptr_tbl; 679 struct efx_channel *channel; 680 struct efx_tx_queue *tx_queue; 681 682 efx_for_each_channel(channel, efx) { 683 efx_for_each_channel_tx_queue(tx_queue, channel) { 684 efx_reado_table(efx, &txd_ptr_tbl, 685 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); 686 if (EFX_OWORD_FIELD(txd_ptr_tbl, 687 FRF_AZ_TX_DESCQ_FLUSH) || 688 EFX_OWORD_FIELD(txd_ptr_tbl, 689 FRF_AZ_TX_DESCQ_EN)) { 690 netif_dbg(efx, hw, efx->net_dev, 691 "flush did not complete on TXQ %d\n", 692 tx_queue->queue); 693 i = false; 694 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, 695 1, 0)) { 696 /* The flush is complete, but we didn't 697 * receive a flush completion event 698 */ 699 netif_dbg(efx, hw, efx->net_dev, 700 "flush complete on TXQ %d, so drain " 701 "the queue\n", tx_queue->queue); 702 /* Don't need to increment drain_pending as it 703 * has already been incremented for the queues 704 * which did not drain 705 */ 706 efx_magic_event(channel, 707 EFX_CHANNEL_MAGIC_TX_DRAIN( 708 tx_queue)); 709 } 710 } 711 } 712 713 return i; 714 } 715 716 /* Flush all the transmit queues, and continue flushing receive queues until 717 * they're all flushed. Wait for the DRAIN events to be recieved so that there 718 * are no more RX and TX events left on any channel. */ 719 int efx_nic_flush_queues(struct efx_nic *efx) 720 { 721 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ 722 struct efx_channel *channel; 723 struct efx_rx_queue *rx_queue; 724 struct efx_tx_queue *tx_queue; 725 int rc = 0; 726 727 efx->type->prepare_flush(efx); 728 729 efx_for_each_channel(channel, efx) { 730 efx_for_each_channel_tx_queue(tx_queue, channel) { 731 atomic_inc(&efx->drain_pending); 732 efx_flush_tx_queue(tx_queue); 733 } 734 efx_for_each_channel_rx_queue(rx_queue, channel) { 735 atomic_inc(&efx->drain_pending); 736 rx_queue->flush_pending = true; 737 atomic_inc(&efx->rxq_flush_pending); 738 } 739 } 740 741 while (timeout && atomic_read(&efx->drain_pending) > 0) { 742 /* If SRIOV is enabled, then offload receive queue flushing to 743 * the firmware (though we will still have to poll for 744 * completion). If that fails, fall back to the old scheme. 745 */ 746 if (efx_sriov_enabled(efx)) { 747 rc = efx_mcdi_flush_rxqs(efx); 748 if (!rc) 749 goto wait; 750 } 751 752 /* The hardware supports four concurrent rx flushes, each of 753 * which may need to be retried if there is an outstanding 754 * descriptor fetch 755 */ 756 efx_for_each_channel(channel, efx) { 757 efx_for_each_channel_rx_queue(rx_queue, channel) { 758 if (atomic_read(&efx->rxq_flush_outstanding) >= 759 EFX_RX_FLUSH_COUNT) 760 break; 761 762 if (rx_queue->flush_pending) { 763 rx_queue->flush_pending = false; 764 atomic_dec(&efx->rxq_flush_pending); 765 atomic_inc(&efx->rxq_flush_outstanding); 766 efx_flush_rx_queue(rx_queue); 767 } 768 } 769 } 770 771 wait: 772 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx), 773 timeout); 774 } 775 776 if (atomic_read(&efx->drain_pending) && 777 !efx_check_tx_flush_complete(efx)) { 778 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 779 "(rx %d+%d)\n", atomic_read(&efx->drain_pending), 780 atomic_read(&efx->rxq_flush_outstanding), 781 atomic_read(&efx->rxq_flush_pending)); 782 rc = -ETIMEDOUT; 783 784 atomic_set(&efx->drain_pending, 0); 785 atomic_set(&efx->rxq_flush_pending, 0); 786 atomic_set(&efx->rxq_flush_outstanding, 0); 787 } 788 789 efx->type->finish_flush(efx); 790 791 return rc; 792 } 793 794 /************************************************************************** 795 * 796 * Event queue processing 797 * Event queues are processed by per-channel tasklets. 798 * 799 **************************************************************************/ 800 801 /* Update a channel's event queue's read pointer (RPTR) register 802 * 803 * This writes the EVQ_RPTR_REG register for the specified channel's 804 * event queue. 805 */ 806 void efx_nic_eventq_read_ack(struct efx_channel *channel) 807 { 808 efx_dword_t reg; 809 struct efx_nic *efx = channel->efx; 810 811 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 812 channel->eventq_read_ptr & channel->eventq_mask); 813 814 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size 815 * of 4 bytes, but it is really 16 bytes just like later revisions. 816 */ 817 efx_writed(efx, ®, 818 efx->type->evq_rptr_tbl_base + 819 FR_BZ_EVQ_RPTR_STEP * channel->channel); 820 } 821 822 /* Use HW to insert a SW defined event */ 823 void efx_generate_event(struct efx_nic *efx, unsigned int evq, 824 efx_qword_t *event) 825 { 826 efx_oword_t drv_ev_reg; 827 828 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 829 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 830 drv_ev_reg.u32[0] = event->u32[0]; 831 drv_ev_reg.u32[1] = event->u32[1]; 832 drv_ev_reg.u32[2] = 0; 833 drv_ev_reg.u32[3] = 0; 834 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); 835 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); 836 } 837 838 static void efx_magic_event(struct efx_channel *channel, u32 magic) 839 { 840 efx_qword_t event; 841 842 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 843 FSE_AZ_EV_CODE_DRV_GEN_EV, 844 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 845 efx_generate_event(channel->efx, channel->channel, &event); 846 } 847 848 /* Handle a transmit completion event 849 * 850 * The NIC batches TX completion events; the message we receive is of 851 * the form "complete all TX events up to this index". 852 */ 853 static int 854 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 855 { 856 unsigned int tx_ev_desc_ptr; 857 unsigned int tx_ev_q_label; 858 struct efx_tx_queue *tx_queue; 859 struct efx_nic *efx = channel->efx; 860 int tx_packets = 0; 861 862 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 863 return 0; 864 865 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 866 /* Transmit completion */ 867 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 868 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 869 tx_queue = efx_channel_get_tx_queue( 870 channel, tx_ev_q_label % EFX_TXQ_TYPES); 871 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 872 tx_queue->ptr_mask); 873 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 874 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 875 /* Rewrite the FIFO write pointer */ 876 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 877 tx_queue = efx_channel_get_tx_queue( 878 channel, tx_ev_q_label % EFX_TXQ_TYPES); 879 880 netif_tx_lock(efx->net_dev); 881 efx_notify_tx_desc(tx_queue); 882 netif_tx_unlock(efx->net_dev); 883 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 884 EFX_WORKAROUND_10727(efx)) { 885 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 886 } else { 887 netif_err(efx, tx_err, efx->net_dev, 888 "channel %d unexpected TX event " 889 EFX_QWORD_FMT"\n", channel->channel, 890 EFX_QWORD_VAL(*event)); 891 } 892 893 return tx_packets; 894 } 895 896 /* Detect errors included in the rx_evt_pkt_ok bit. */ 897 static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 898 const efx_qword_t *event) 899 { 900 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 901 struct efx_nic *efx = rx_queue->efx; 902 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 903 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 904 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 905 bool rx_ev_other_err, rx_ev_pause_frm; 906 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 907 unsigned rx_ev_pkt_type; 908 909 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 910 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 911 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 912 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 913 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 914 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 915 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 916 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 917 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 918 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 919 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 920 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 921 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 922 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 923 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 924 925 /* Every error apart from tobe_disc and pause_frm */ 926 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 927 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 928 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 929 930 /* Count errors that are not in MAC stats. Ignore expected 931 * checksum errors during self-test. */ 932 if (rx_ev_frm_trunc) 933 ++channel->n_rx_frm_trunc; 934 else if (rx_ev_tobe_disc) 935 ++channel->n_rx_tobe_disc; 936 else if (!efx->loopback_selftest) { 937 if (rx_ev_ip_hdr_chksum_err) 938 ++channel->n_rx_ip_hdr_chksum_err; 939 else if (rx_ev_tcp_udp_chksum_err) 940 ++channel->n_rx_tcp_udp_chksum_err; 941 } 942 943 /* TOBE_DISC is expected on unicast mismatches; don't print out an 944 * error message. FRM_TRUNC indicates RXDP dropped the packet due 945 * to a FIFO overflow. 946 */ 947 #ifdef DEBUG 948 if (rx_ev_other_err && net_ratelimit()) { 949 netif_dbg(efx, rx_err, efx->net_dev, 950 " RX queue %d unexpected RX event " 951 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 952 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 953 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 954 rx_ev_ip_hdr_chksum_err ? 955 " [IP_HDR_CHKSUM_ERR]" : "", 956 rx_ev_tcp_udp_chksum_err ? 957 " [TCP_UDP_CHKSUM_ERR]" : "", 958 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 959 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 960 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 961 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 962 rx_ev_pause_frm ? " [PAUSE]" : ""); 963 } 964 #endif 965 966 /* The frame must be discarded if any of these are true. */ 967 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 968 rx_ev_tobe_disc | rx_ev_pause_frm) ? 969 EFX_RX_PKT_DISCARD : 0; 970 } 971 972 /* Handle receive events that are not in-order. */ 973 static void 974 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 975 { 976 struct efx_nic *efx = rx_queue->efx; 977 unsigned expected, dropped; 978 979 expected = rx_queue->removed_count & rx_queue->ptr_mask; 980 dropped = (index - expected) & rx_queue->ptr_mask; 981 netif_info(efx, rx_err, efx->net_dev, 982 "dropped %d events (index=%d expected=%d)\n", 983 dropped, index, expected); 984 985 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 986 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 987 } 988 989 /* Handle a packet received event 990 * 991 * The NIC gives a "discard" flag if it's a unicast packet with the 992 * wrong destination address 993 * Also "is multicast" and "matches multicast filter" flags can be used to 994 * discard non-matching multicast packets. 995 */ 996 static void 997 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 998 { 999 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 1000 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 1001 unsigned expected_ptr; 1002 bool rx_ev_pkt_ok; 1003 u16 flags; 1004 struct efx_rx_queue *rx_queue; 1005 struct efx_nic *efx = channel->efx; 1006 1007 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 1008 return; 1009 1010 /* Basic packet information */ 1011 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 1012 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 1013 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 1014 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); 1015 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); 1016 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 1017 channel->channel); 1018 1019 rx_queue = efx_channel_get_rx_queue(channel); 1020 1021 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 1022 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 1023 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 1024 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 1025 1026 if (likely(rx_ev_pkt_ok)) { 1027 /* If packet is marked as OK and packet type is TCP/IP or 1028 * UDP/IP, then we can rely on the hardware checksum. 1029 */ 1030 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 1031 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ? 1032 EFX_RX_PKT_CSUMMED : 0; 1033 } else { 1034 flags = efx_handle_rx_not_ok(rx_queue, event); 1035 } 1036 1037 /* Detect multicast packets that didn't match the filter */ 1038 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 1039 if (rx_ev_mcast_pkt) { 1040 unsigned int rx_ev_mcast_hash_match = 1041 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 1042 1043 if (unlikely(!rx_ev_mcast_hash_match)) { 1044 ++channel->n_rx_mcast_mismatch; 1045 flags |= EFX_RX_PKT_DISCARD; 1046 } 1047 } 1048 1049 channel->irq_mod_score += 2; 1050 1051 /* Handle received packet */ 1052 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); 1053 } 1054 1055 /* If this flush done event corresponds to a &struct efx_tx_queue, then 1056 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue 1057 * of all transmit completions. 1058 */ 1059 static void 1060 efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1061 { 1062 struct efx_tx_queue *tx_queue; 1063 int qid; 1064 1065 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1066 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { 1067 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, 1068 qid % EFX_TXQ_TYPES); 1069 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { 1070 efx_magic_event(tx_queue->channel, 1071 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 1072 } 1073 } 1074 } 1075 1076 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush 1077 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add 1078 * the RX queue back to the mask of RX queues in need of flushing. 1079 */ 1080 static void 1081 efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1082 { 1083 struct efx_channel *channel; 1084 struct efx_rx_queue *rx_queue; 1085 int qid; 1086 bool failed; 1087 1088 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1089 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1090 if (qid >= efx->n_channels) 1091 return; 1092 channel = efx_get_channel(efx, qid); 1093 if (!efx_channel_has_rx_queue(channel)) 1094 return; 1095 rx_queue = efx_channel_get_rx_queue(channel); 1096 1097 if (failed) { 1098 netif_info(efx, hw, efx->net_dev, 1099 "RXQ %d flush retry\n", qid); 1100 rx_queue->flush_pending = true; 1101 atomic_inc(&efx->rxq_flush_pending); 1102 } else { 1103 efx_magic_event(efx_rx_queue_channel(rx_queue), 1104 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); 1105 } 1106 atomic_dec(&efx->rxq_flush_outstanding); 1107 if (efx_flush_wake(efx)) 1108 wake_up(&efx->flush_wq); 1109 } 1110 1111 static void 1112 efx_handle_drain_event(struct efx_channel *channel) 1113 { 1114 struct efx_nic *efx = channel->efx; 1115 1116 WARN_ON(atomic_read(&efx->drain_pending) == 0); 1117 atomic_dec(&efx->drain_pending); 1118 if (efx_flush_wake(efx)) 1119 wake_up(&efx->flush_wq); 1120 } 1121 1122 static void 1123 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 1124 { 1125 struct efx_nic *efx = channel->efx; 1126 struct efx_rx_queue *rx_queue = 1127 efx_channel_has_rx_queue(channel) ? 1128 efx_channel_get_rx_queue(channel) : NULL; 1129 unsigned magic, code; 1130 1131 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 1132 code = _EFX_CHANNEL_MAGIC_CODE(magic); 1133 1134 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { 1135 channel->event_test_cpu = raw_smp_processor_id(); 1136 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { 1137 /* The queue must be empty, so we won't receive any rx 1138 * events, so efx_process_channel() won't refill the 1139 * queue. Refill it here */ 1140 efx_fast_push_rx_descriptors(rx_queue); 1141 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { 1142 rx_queue->enabled = false; 1143 efx_handle_drain_event(channel); 1144 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { 1145 efx_handle_drain_event(channel); 1146 } else { 1147 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1148 "generated event "EFX_QWORD_FMT"\n", 1149 channel->channel, EFX_QWORD_VAL(*event)); 1150 } 1151 } 1152 1153 static void 1154 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 1155 { 1156 struct efx_nic *efx = channel->efx; 1157 unsigned int ev_sub_code; 1158 unsigned int ev_sub_data; 1159 1160 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 1161 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1162 1163 switch (ev_sub_code) { 1164 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1165 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1166 channel->channel, ev_sub_data); 1167 efx_handle_tx_flush_done(efx, event); 1168 efx_sriov_tx_flush_done(efx, event); 1169 break; 1170 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1171 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1172 channel->channel, ev_sub_data); 1173 efx_handle_rx_flush_done(efx, event); 1174 efx_sriov_rx_flush_done(efx, event); 1175 break; 1176 case FSE_AZ_EVQ_INIT_DONE_EV: 1177 netif_dbg(efx, hw, efx->net_dev, 1178 "channel %d EVQ %d initialised\n", 1179 channel->channel, ev_sub_data); 1180 break; 1181 case FSE_AZ_SRM_UPD_DONE_EV: 1182 netif_vdbg(efx, hw, efx->net_dev, 1183 "channel %d SRAM update done\n", channel->channel); 1184 break; 1185 case FSE_AZ_WAKE_UP_EV: 1186 netif_vdbg(efx, hw, efx->net_dev, 1187 "channel %d RXQ %d wakeup event\n", 1188 channel->channel, ev_sub_data); 1189 break; 1190 case FSE_AZ_TIMER_EV: 1191 netif_vdbg(efx, hw, efx->net_dev, 1192 "channel %d RX queue %d timer expired\n", 1193 channel->channel, ev_sub_data); 1194 break; 1195 case FSE_AA_RX_RECOVER_EV: 1196 netif_err(efx, rx_err, efx->net_dev, 1197 "channel %d seen DRIVER RX_RESET event. " 1198 "Resetting.\n", channel->channel); 1199 atomic_inc(&efx->rx_reset); 1200 efx_schedule_reset(efx, 1201 EFX_WORKAROUND_6555(efx) ? 1202 RESET_TYPE_RX_RECOVERY : 1203 RESET_TYPE_DISABLE); 1204 break; 1205 case FSE_BZ_RX_DSC_ERROR_EV: 1206 if (ev_sub_data < EFX_VI_BASE) { 1207 netif_err(efx, rx_err, efx->net_dev, 1208 "RX DMA Q %d reports descriptor fetch error." 1209 " RX Q %d is disabled.\n", ev_sub_data, 1210 ev_sub_data); 1211 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 1212 } else 1213 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1214 break; 1215 case FSE_BZ_TX_DSC_ERROR_EV: 1216 if (ev_sub_data < EFX_VI_BASE) { 1217 netif_err(efx, tx_err, efx->net_dev, 1218 "TX DMA Q %d reports descriptor fetch error." 1219 " TX Q %d is disabled.\n", ev_sub_data, 1220 ev_sub_data); 1221 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 1222 } else 1223 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1224 break; 1225 default: 1226 netif_vdbg(efx, hw, efx->net_dev, 1227 "channel %d unknown driver event code %d " 1228 "data %04x\n", channel->channel, ev_sub_code, 1229 ev_sub_data); 1230 break; 1231 } 1232 } 1233 1234 int efx_nic_process_eventq(struct efx_channel *channel, int budget) 1235 { 1236 struct efx_nic *efx = channel->efx; 1237 unsigned int read_ptr; 1238 efx_qword_t event, *p_event; 1239 int ev_code; 1240 int tx_packets = 0; 1241 int spent = 0; 1242 1243 read_ptr = channel->eventq_read_ptr; 1244 1245 for (;;) { 1246 p_event = efx_event(channel, read_ptr); 1247 event = *p_event; 1248 1249 if (!efx_event_present(&event)) 1250 /* End of events */ 1251 break; 1252 1253 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1254 "channel %d event is "EFX_QWORD_FMT"\n", 1255 channel->channel, EFX_QWORD_VAL(event)); 1256 1257 /* Clear this event by marking it all ones */ 1258 EFX_SET_QWORD(*p_event); 1259 1260 ++read_ptr; 1261 1262 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1263 1264 switch (ev_code) { 1265 case FSE_AZ_EV_CODE_RX_EV: 1266 efx_handle_rx_event(channel, &event); 1267 if (++spent == budget) 1268 goto out; 1269 break; 1270 case FSE_AZ_EV_CODE_TX_EV: 1271 tx_packets += efx_handle_tx_event(channel, &event); 1272 if (tx_packets > efx->txq_entries) { 1273 spent = budget; 1274 goto out; 1275 } 1276 break; 1277 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1278 efx_handle_generated_event(channel, &event); 1279 break; 1280 case FSE_AZ_EV_CODE_DRIVER_EV: 1281 efx_handle_driver_event(channel, &event); 1282 break; 1283 case FSE_CZ_EV_CODE_USER_EV: 1284 efx_sriov_event(channel, &event); 1285 break; 1286 case FSE_CZ_EV_CODE_MCDI_EV: 1287 efx_mcdi_process_event(channel, &event); 1288 break; 1289 case FSE_AZ_EV_CODE_GLOBAL_EV: 1290 if (efx->type->handle_global_event && 1291 efx->type->handle_global_event(channel, &event)) 1292 break; 1293 /* else fall through */ 1294 default: 1295 netif_err(channel->efx, hw, channel->efx->net_dev, 1296 "channel %d unknown event type %d (data " 1297 EFX_QWORD_FMT ")\n", channel->channel, 1298 ev_code, EFX_QWORD_VAL(event)); 1299 } 1300 } 1301 1302 out: 1303 channel->eventq_read_ptr = read_ptr; 1304 return spent; 1305 } 1306 1307 /* Check whether an event is present in the eventq at the current 1308 * read pointer. Only useful for self-test. 1309 */ 1310 bool efx_nic_event_present(struct efx_channel *channel) 1311 { 1312 return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1313 } 1314 1315 /* Allocate buffer table entries for event queue */ 1316 int efx_nic_probe_eventq(struct efx_channel *channel) 1317 { 1318 struct efx_nic *efx = channel->efx; 1319 unsigned entries; 1320 1321 entries = channel->eventq_mask + 1; 1322 return efx_alloc_special_buffer(efx, &channel->eventq, 1323 entries * sizeof(efx_qword_t)); 1324 } 1325 1326 void efx_nic_init_eventq(struct efx_channel *channel) 1327 { 1328 efx_oword_t reg; 1329 struct efx_nic *efx = channel->efx; 1330 1331 netif_dbg(efx, hw, efx->net_dev, 1332 "channel %d event queue in special buffers %d-%d\n", 1333 channel->channel, channel->eventq.index, 1334 channel->eventq.index + channel->eventq.entries - 1); 1335 1336 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1337 EFX_POPULATE_OWORD_3(reg, 1338 FRF_CZ_TIMER_Q_EN, 1, 1339 FRF_CZ_HOST_NOTIFY_MODE, 0, 1340 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1341 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1342 } 1343 1344 /* Pin event queue buffer */ 1345 efx_init_special_buffer(efx, &channel->eventq); 1346 1347 /* Fill event queue with all ones (i.e. empty events) */ 1348 memset(channel->eventq.addr, 0xff, channel->eventq.len); 1349 1350 /* Push event queue to card */ 1351 EFX_POPULATE_OWORD_3(reg, 1352 FRF_AZ_EVQ_EN, 1, 1353 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1354 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1355 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1356 channel->channel); 1357 1358 efx->type->push_irq_moderation(channel); 1359 } 1360 1361 void efx_nic_fini_eventq(struct efx_channel *channel) 1362 { 1363 efx_oword_t reg; 1364 struct efx_nic *efx = channel->efx; 1365 1366 /* Remove event queue from card */ 1367 EFX_ZERO_OWORD(reg); 1368 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1369 channel->channel); 1370 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1371 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1372 1373 /* Unpin event queue */ 1374 efx_fini_special_buffer(efx, &channel->eventq); 1375 } 1376 1377 /* Free buffers backing event queue */ 1378 void efx_nic_remove_eventq(struct efx_channel *channel) 1379 { 1380 efx_free_special_buffer(channel->efx, &channel->eventq); 1381 } 1382 1383 1384 void efx_nic_event_test_start(struct efx_channel *channel) 1385 { 1386 channel->event_test_cpu = -1; 1387 smp_wmb(); 1388 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 1389 } 1390 1391 void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) 1392 { 1393 efx_magic_event(efx_rx_queue_channel(rx_queue), 1394 EFX_CHANNEL_MAGIC_FILL(rx_queue)); 1395 } 1396 1397 /************************************************************************** 1398 * 1399 * Hardware interrupts 1400 * The hardware interrupt handler does very little work; all the event 1401 * queue processing is carried out by per-channel tasklets. 1402 * 1403 **************************************************************************/ 1404 1405 /* Enable/disable/generate interrupts */ 1406 static inline void efx_nic_interrupts(struct efx_nic *efx, 1407 bool enabled, bool force) 1408 { 1409 efx_oword_t int_en_reg_ker; 1410 1411 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1412 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1413 FRF_AZ_KER_INT_KER, force, 1414 FRF_AZ_DRV_INT_EN_KER, enabled); 1415 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1416 } 1417 1418 void efx_nic_enable_interrupts(struct efx_nic *efx) 1419 { 1420 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1421 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1422 1423 efx_nic_interrupts(efx, true, false); 1424 } 1425 1426 void efx_nic_disable_interrupts(struct efx_nic *efx) 1427 { 1428 /* Disable interrupts */ 1429 efx_nic_interrupts(efx, false, false); 1430 } 1431 1432 /* Generate a test interrupt 1433 * Interrupt must already have been enabled, otherwise nasty things 1434 * may happen. 1435 */ 1436 void efx_nic_irq_test_start(struct efx_nic *efx) 1437 { 1438 efx->last_irq_cpu = -1; 1439 smp_wmb(); 1440 efx_nic_interrupts(efx, true, true); 1441 } 1442 1443 /* Process a fatal interrupt 1444 * Disable bus mastering ASAP and schedule a reset 1445 */ 1446 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) 1447 { 1448 struct falcon_nic_data *nic_data = efx->nic_data; 1449 efx_oword_t *int_ker = efx->irq_status.addr; 1450 efx_oword_t fatal_intr; 1451 int error, mem_perr; 1452 1453 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1454 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1455 1456 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1457 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1458 EFX_OWORD_VAL(fatal_intr), 1459 error ? "disabling bus mastering" : "no recognised error"); 1460 1461 /* If this is a memory parity error dump which blocks are offending */ 1462 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1463 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1464 if (mem_perr) { 1465 efx_oword_t reg; 1466 efx_reado(efx, ®, FR_AZ_MEM_STAT); 1467 netif_err(efx, hw, efx->net_dev, 1468 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1469 EFX_OWORD_VAL(reg)); 1470 } 1471 1472 /* Disable both devices */ 1473 pci_clear_master(efx->pci_dev); 1474 if (efx_nic_is_dual_func(efx)) 1475 pci_clear_master(nic_data->pci_dev2); 1476 efx_nic_disable_interrupts(efx); 1477 1478 /* Count errors and reset or disable the NIC accordingly */ 1479 if (efx->int_error_count == 0 || 1480 time_after(jiffies, efx->int_error_expire)) { 1481 efx->int_error_count = 0; 1482 efx->int_error_expire = 1483 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1484 } 1485 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1486 netif_err(efx, hw, efx->net_dev, 1487 "SYSTEM ERROR - reset scheduled\n"); 1488 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1489 } else { 1490 netif_err(efx, hw, efx->net_dev, 1491 "SYSTEM ERROR - max number of errors seen." 1492 "NIC will be disabled\n"); 1493 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1494 } 1495 1496 return IRQ_HANDLED; 1497 } 1498 1499 /* Handle a legacy interrupt 1500 * Acknowledges the interrupt and schedule event queue processing. 1501 */ 1502 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) 1503 { 1504 struct efx_nic *efx = dev_id; 1505 efx_oword_t *int_ker = efx->irq_status.addr; 1506 irqreturn_t result = IRQ_NONE; 1507 struct efx_channel *channel; 1508 efx_dword_t reg; 1509 u32 queues; 1510 int syserr; 1511 1512 /* Could this be ours? If interrupts are disabled then the 1513 * channel state may not be valid. 1514 */ 1515 if (!efx->legacy_irq_enabled) 1516 return result; 1517 1518 /* Read the ISR which also ACKs the interrupts */ 1519 efx_readd(efx, ®, FR_BZ_INT_ISR0); 1520 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1521 1522 /* Handle non-event-queue sources */ 1523 if (queues & (1U << efx->irq_level)) { 1524 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1525 if (unlikely(syserr)) 1526 return efx_nic_fatal_interrupt(efx); 1527 efx->last_irq_cpu = raw_smp_processor_id(); 1528 } 1529 1530 if (queues != 0) { 1531 if (EFX_WORKAROUND_15783(efx)) 1532 efx->irq_zero_count = 0; 1533 1534 /* Schedule processing of any interrupting queues */ 1535 efx_for_each_channel(channel, efx) { 1536 if (queues & 1) 1537 efx_schedule_channel_irq(channel); 1538 queues >>= 1; 1539 } 1540 result = IRQ_HANDLED; 1541 1542 } else if (EFX_WORKAROUND_15783(efx)) { 1543 efx_qword_t *event; 1544 1545 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1546 * because this might be a shared interrupt. */ 1547 if (efx->irq_zero_count++ == 0) 1548 result = IRQ_HANDLED; 1549 1550 /* Ensure we schedule or rearm all event queues */ 1551 efx_for_each_channel(channel, efx) { 1552 event = efx_event(channel, channel->eventq_read_ptr); 1553 if (efx_event_present(event)) 1554 efx_schedule_channel_irq(channel); 1555 else 1556 efx_nic_eventq_read_ack(channel); 1557 } 1558 } 1559 1560 if (result == IRQ_HANDLED) 1561 netif_vdbg(efx, intr, efx->net_dev, 1562 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1563 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1564 1565 return result; 1566 } 1567 1568 /* Handle an MSI interrupt 1569 * 1570 * Handle an MSI hardware interrupt. This routine schedules event 1571 * queue processing. No interrupt acknowledgement cycle is necessary. 1572 * Also, we never need to check that the interrupt is for us, since 1573 * MSI interrupts cannot be shared. 1574 */ 1575 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1576 { 1577 struct efx_channel *channel = *(struct efx_channel **)dev_id; 1578 struct efx_nic *efx = channel->efx; 1579 efx_oword_t *int_ker = efx->irq_status.addr; 1580 int syserr; 1581 1582 netif_vdbg(efx, intr, efx->net_dev, 1583 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1584 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1585 1586 /* Handle non-event-queue sources */ 1587 if (channel->channel == efx->irq_level) { 1588 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1589 if (unlikely(syserr)) 1590 return efx_nic_fatal_interrupt(efx); 1591 efx->last_irq_cpu = raw_smp_processor_id(); 1592 } 1593 1594 /* Schedule processing of the channel */ 1595 efx_schedule_channel_irq(channel); 1596 1597 return IRQ_HANDLED; 1598 } 1599 1600 1601 /* Setup RSS indirection table. 1602 * This maps from the hash value of the packet to RXQ 1603 */ 1604 void efx_nic_push_rx_indir_table(struct efx_nic *efx) 1605 { 1606 size_t i = 0; 1607 efx_dword_t dword; 1608 1609 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) 1610 return; 1611 1612 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1613 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1614 1615 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1616 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1617 efx->rx_indir_table[i]); 1618 efx_writed(efx, &dword, 1619 FR_BZ_RX_INDIRECTION_TBL + 1620 FR_BZ_RX_INDIRECTION_TBL_STEP * i); 1621 } 1622 } 1623 1624 /* Hook interrupt handler(s) 1625 * Try MSI and then legacy interrupts. 1626 */ 1627 int efx_nic_init_interrupt(struct efx_nic *efx) 1628 { 1629 struct efx_channel *channel; 1630 int rc; 1631 1632 if (!EFX_INT_MODE_USE_MSI(efx)) { 1633 irq_handler_t handler; 1634 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1635 handler = efx_legacy_interrupt; 1636 else 1637 handler = falcon_legacy_interrupt_a1; 1638 1639 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, 1640 efx->name, efx); 1641 if (rc) { 1642 netif_err(efx, drv, efx->net_dev, 1643 "failed to hook legacy IRQ %d\n", 1644 efx->pci_dev->irq); 1645 goto fail1; 1646 } 1647 return 0; 1648 } 1649 1650 /* Hook MSI or MSI-X interrupt */ 1651 efx_for_each_channel(channel, efx) { 1652 rc = request_irq(channel->irq, efx_msi_interrupt, 1653 IRQF_PROBE_SHARED, /* Not shared */ 1654 efx->channel_name[channel->channel], 1655 &efx->channel[channel->channel]); 1656 if (rc) { 1657 netif_err(efx, drv, efx->net_dev, 1658 "failed to hook IRQ %d\n", channel->irq); 1659 goto fail2; 1660 } 1661 } 1662 1663 return 0; 1664 1665 fail2: 1666 efx_for_each_channel(channel, efx) 1667 free_irq(channel->irq, &efx->channel[channel->channel]); 1668 fail1: 1669 return rc; 1670 } 1671 1672 void efx_nic_fini_interrupt(struct efx_nic *efx) 1673 { 1674 struct efx_channel *channel; 1675 efx_oword_t reg; 1676 1677 /* Disable MSI/MSI-X interrupts */ 1678 efx_for_each_channel(channel, efx) { 1679 if (channel->irq) 1680 free_irq(channel->irq, &efx->channel[channel->channel]); 1681 } 1682 1683 /* ACK legacy interrupt */ 1684 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1685 efx_reado(efx, ®, FR_BZ_INT_ISR0); 1686 else 1687 falcon_irq_ack_a1(efx); 1688 1689 /* Disable legacy interrupt */ 1690 if (efx->legacy_irq) 1691 free_irq(efx->legacy_irq, efx); 1692 } 1693 1694 /* Looks at available SRAM resources and works out how many queues we 1695 * can support, and where things like descriptor caches should live. 1696 * 1697 * SRAM is split up as follows: 1698 * 0 buftbl entries for channels 1699 * efx->vf_buftbl_base buftbl entries for SR-IOV 1700 * efx->rx_dc_base RX descriptor caches 1701 * efx->tx_dc_base TX descriptor caches 1702 */ 1703 void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) 1704 { 1705 unsigned vi_count, buftbl_min; 1706 1707 /* Account for the buffer table entries backing the datapath channels 1708 * and the descriptor caches for those channels. 1709 */ 1710 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + 1711 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + 1712 efx->n_channels * EFX_MAX_EVQ_SIZE) 1713 * sizeof(efx_qword_t) / EFX_BUF_SIZE); 1714 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); 1715 1716 #ifdef CONFIG_SFC_SRIOV 1717 if (efx_sriov_wanted(efx)) { 1718 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; 1719 1720 efx->vf_buftbl_base = buftbl_min; 1721 1722 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; 1723 vi_count = max(vi_count, EFX_VI_BASE); 1724 buftbl_free = (sram_lim_qw - buftbl_min - 1725 vi_count * vi_dc_entries); 1726 1727 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * 1728 efx_vf_size(efx)); 1729 vf_limit = min(buftbl_free / entries_per_vf, 1730 (1024U - EFX_VI_BASE) >> efx->vi_scale); 1731 1732 if (efx->vf_count > vf_limit) { 1733 netif_err(efx, probe, efx->net_dev, 1734 "Reducing VF count from from %d to %d\n", 1735 efx->vf_count, vf_limit); 1736 efx->vf_count = vf_limit; 1737 } 1738 vi_count += efx->vf_count * efx_vf_size(efx); 1739 } 1740 #endif 1741 1742 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; 1743 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; 1744 } 1745 1746 u32 efx_nic_fpga_ver(struct efx_nic *efx) 1747 { 1748 efx_oword_t altera_build; 1749 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1750 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1751 } 1752 1753 void efx_nic_init_common(struct efx_nic *efx) 1754 { 1755 efx_oword_t temp; 1756 1757 /* Set positions of descriptor caches in SRAM. */ 1758 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); 1759 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1760 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); 1761 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1762 1763 /* Set TX descriptor cache size. */ 1764 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1765 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1766 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1767 1768 /* Set RX descriptor cache size. Set low watermark to size-8, as 1769 * this allows most efficient prefetching. 1770 */ 1771 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1772 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1773 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1774 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1775 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1776 1777 /* Program INT_KER address */ 1778 EFX_POPULATE_OWORD_2(temp, 1779 FRF_AZ_NORM_INT_VEC_DIS_KER, 1780 EFX_INT_MODE_USE_MSI(efx), 1781 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1782 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1783 1784 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1785 /* Use an interrupt level unused by event queues */ 1786 efx->irq_level = 0x1f; 1787 else 1788 /* Use a valid MSI-X vector */ 1789 efx->irq_level = 0; 1790 1791 /* Enable all the genuinely fatal interrupts. (They are still 1792 * masked by the overall interrupt mask, controlled by 1793 * falcon_interrupts()). 1794 * 1795 * Note: All other fatal interrupts are enabled 1796 */ 1797 EFX_POPULATE_OWORD_3(temp, 1798 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1799 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1800 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1801 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1802 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1803 EFX_INVERT_OWORD(temp); 1804 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1805 1806 efx_nic_push_rx_indir_table(efx); 1807 1808 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1809 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1810 */ 1811 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1812 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1813 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1814 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1815 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1816 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1817 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1818 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1819 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1820 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1821 /* Disable hardware watchdog which can misfire */ 1822 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1823 /* Squash TX of packets of 16 bytes or less */ 1824 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1825 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1826 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1827 1828 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 1829 EFX_POPULATE_OWORD_4(temp, 1830 /* Default values */ 1831 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1832 FRF_BZ_TX_PACE_SB_AF, 0xb, 1833 FRF_BZ_TX_PACE_FB_BASE, 0, 1834 /* Allow large pace values in the 1835 * fast bin. */ 1836 FRF_BZ_TX_PACE_BIN_TH, 1837 FFE_BZ_TX_PACE_RESERVED); 1838 efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1839 } 1840 } 1841 1842 /* Register dump */ 1843 1844 #define REGISTER_REVISION_A 1 1845 #define REGISTER_REVISION_B 2 1846 #define REGISTER_REVISION_C 3 1847 #define REGISTER_REVISION_Z 3 /* latest revision */ 1848 1849 struct efx_nic_reg { 1850 u32 offset:24; 1851 u32 min_revision:2, max_revision:2; 1852 }; 1853 1854 #define REGISTER(name, min_rev, max_rev) { \ 1855 FR_ ## min_rev ## max_rev ## _ ## name, \ 1856 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 1857 } 1858 #define REGISTER_AA(name) REGISTER(name, A, A) 1859 #define REGISTER_AB(name) REGISTER(name, A, B) 1860 #define REGISTER_AZ(name) REGISTER(name, A, Z) 1861 #define REGISTER_BB(name) REGISTER(name, B, B) 1862 #define REGISTER_BZ(name) REGISTER(name, B, Z) 1863 #define REGISTER_CZ(name) REGISTER(name, C, Z) 1864 1865 static const struct efx_nic_reg efx_nic_regs[] = { 1866 REGISTER_AZ(ADR_REGION), 1867 REGISTER_AZ(INT_EN_KER), 1868 REGISTER_BZ(INT_EN_CHAR), 1869 REGISTER_AZ(INT_ADR_KER), 1870 REGISTER_BZ(INT_ADR_CHAR), 1871 /* INT_ACK_KER is WO */ 1872 /* INT_ISR0 is RC */ 1873 REGISTER_AZ(HW_INIT), 1874 REGISTER_CZ(USR_EV_CFG), 1875 REGISTER_AB(EE_SPI_HCMD), 1876 REGISTER_AB(EE_SPI_HADR), 1877 REGISTER_AB(EE_SPI_HDATA), 1878 REGISTER_AB(EE_BASE_PAGE), 1879 REGISTER_AB(EE_VPD_CFG0), 1880 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ 1881 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ 1882 /* PCIE_CORE_INDIRECT is indirect */ 1883 REGISTER_AB(NIC_STAT), 1884 REGISTER_AB(GPIO_CTL), 1885 REGISTER_AB(GLB_CTL), 1886 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ 1887 REGISTER_BZ(DP_CTRL), 1888 REGISTER_AZ(MEM_STAT), 1889 REGISTER_AZ(CS_DEBUG), 1890 REGISTER_AZ(ALTERA_BUILD), 1891 REGISTER_AZ(CSR_SPARE), 1892 REGISTER_AB(PCIE_SD_CTL0123), 1893 REGISTER_AB(PCIE_SD_CTL45), 1894 REGISTER_AB(PCIE_PCS_CTL_STAT), 1895 /* DEBUG_DATA_OUT is not used */ 1896 /* DRV_EV is WO */ 1897 REGISTER_AZ(EVQ_CTL), 1898 REGISTER_AZ(EVQ_CNT1), 1899 REGISTER_AZ(EVQ_CNT2), 1900 REGISTER_AZ(BUF_TBL_CFG), 1901 REGISTER_AZ(SRM_RX_DC_CFG), 1902 REGISTER_AZ(SRM_TX_DC_CFG), 1903 REGISTER_AZ(SRM_CFG), 1904 /* BUF_TBL_UPD is WO */ 1905 REGISTER_AZ(SRM_UPD_EVQ), 1906 REGISTER_AZ(SRAM_PARITY), 1907 REGISTER_AZ(RX_CFG), 1908 REGISTER_BZ(RX_FILTER_CTL), 1909 /* RX_FLUSH_DESCQ is WO */ 1910 REGISTER_AZ(RX_DC_CFG), 1911 REGISTER_AZ(RX_DC_PF_WM), 1912 REGISTER_BZ(RX_RSS_TKEY), 1913 /* RX_NODESC_DROP is RC */ 1914 REGISTER_AA(RX_SELF_RST), 1915 /* RX_DEBUG, RX_PUSH_DROP are not used */ 1916 REGISTER_CZ(RX_RSS_IPV6_REG1), 1917 REGISTER_CZ(RX_RSS_IPV6_REG2), 1918 REGISTER_CZ(RX_RSS_IPV6_REG3), 1919 /* TX_FLUSH_DESCQ is WO */ 1920 REGISTER_AZ(TX_DC_CFG), 1921 REGISTER_AA(TX_CHKSM_CFG), 1922 REGISTER_AZ(TX_CFG), 1923 /* TX_PUSH_DROP is not used */ 1924 REGISTER_AZ(TX_RESERVED), 1925 REGISTER_BZ(TX_PACE), 1926 /* TX_PACE_DROP_QID is RC */ 1927 REGISTER_BB(TX_VLAN), 1928 REGISTER_BZ(TX_IPFIL_PORTEN), 1929 REGISTER_AB(MD_TXD), 1930 REGISTER_AB(MD_RXD), 1931 REGISTER_AB(MD_CS), 1932 REGISTER_AB(MD_PHY_ADR), 1933 REGISTER_AB(MD_ID), 1934 /* MD_STAT is RC */ 1935 REGISTER_AB(MAC_STAT_DMA), 1936 REGISTER_AB(MAC_CTRL), 1937 REGISTER_BB(GEN_MODE), 1938 REGISTER_AB(MAC_MC_HASH_REG0), 1939 REGISTER_AB(MAC_MC_HASH_REG1), 1940 REGISTER_AB(GM_CFG1), 1941 REGISTER_AB(GM_CFG2), 1942 /* GM_IPG and GM_HD are not used */ 1943 REGISTER_AB(GM_MAX_FLEN), 1944 /* GM_TEST is not used */ 1945 REGISTER_AB(GM_ADR1), 1946 REGISTER_AB(GM_ADR2), 1947 REGISTER_AB(GMF_CFG0), 1948 REGISTER_AB(GMF_CFG1), 1949 REGISTER_AB(GMF_CFG2), 1950 REGISTER_AB(GMF_CFG3), 1951 REGISTER_AB(GMF_CFG4), 1952 REGISTER_AB(GMF_CFG5), 1953 REGISTER_BB(TX_SRC_MAC_CTL), 1954 REGISTER_AB(XM_ADR_LO), 1955 REGISTER_AB(XM_ADR_HI), 1956 REGISTER_AB(XM_GLB_CFG), 1957 REGISTER_AB(XM_TX_CFG), 1958 REGISTER_AB(XM_RX_CFG), 1959 REGISTER_AB(XM_MGT_INT_MASK), 1960 REGISTER_AB(XM_FC), 1961 REGISTER_AB(XM_PAUSE_TIME), 1962 REGISTER_AB(XM_TX_PARAM), 1963 REGISTER_AB(XM_RX_PARAM), 1964 /* XM_MGT_INT_MSK (note no 'A') is RC */ 1965 REGISTER_AB(XX_PWR_RST), 1966 REGISTER_AB(XX_SD_CTL), 1967 REGISTER_AB(XX_TXDRV_CTL), 1968 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 1969 /* XX_CORE_STAT is partly RC */ 1970 }; 1971 1972 struct efx_nic_reg_table { 1973 u32 offset:24; 1974 u32 min_revision:2, max_revision:2; 1975 u32 step:6, rows:21; 1976 }; 1977 1978 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 1979 offset, \ 1980 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 1981 step, rows \ 1982 } 1983 #define REGISTER_TABLE(name, min_rev, max_rev) \ 1984 REGISTER_TABLE_DIMENSIONS( \ 1985 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 1986 min_rev, max_rev, \ 1987 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 1988 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 1989 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 1990 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 1991 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 1992 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 1993 #define REGISTER_TABLE_BB_CZ(name) \ 1994 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 1995 FR_BZ_ ## name ## _STEP, \ 1996 FR_BB_ ## name ## _ROWS), \ 1997 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 1998 FR_BZ_ ## name ## _STEP, \ 1999 FR_CZ_ ## name ## _ROWS) 2000 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 2001 2002 static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 2003 /* DRIVER is not used */ 2004 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ 2005 REGISTER_TABLE_BB(TX_IPFIL_TBL), 2006 REGISTER_TABLE_BB(TX_SRC_MAC_TBL), 2007 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), 2008 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), 2009 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), 2010 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 2011 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 2012 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 2013 /* We can't reasonably read all of the buffer table (up to 8MB!). 2014 * However this driver will only use a few entries. Reading 2015 * 1K entries allows for some expansion of queue count and 2016 * size before we need to change the version. */ 2017 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 2018 A, A, 8, 1024), 2019 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 2020 B, Z, 8, 1024), 2021 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 2022 REGISTER_TABLE_BB_CZ(TIMER_TBL), 2023 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 2024 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), 2025 /* TX_FILTER_TBL0 is huge and not used by this driver */ 2026 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), 2027 REGISTER_TABLE_CZ(MC_TREG_SMEM), 2028 /* MSIX_PBA_TABLE is not mapped */ 2029 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 2030 REGISTER_TABLE_BZ(RX_FILTER_TBL0), 2031 }; 2032 2033 size_t efx_nic_get_regs_len(struct efx_nic *efx) 2034 { 2035 const struct efx_nic_reg *reg; 2036 const struct efx_nic_reg_table *table; 2037 size_t len = 0; 2038 2039 for (reg = efx_nic_regs; 2040 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 2041 reg++) 2042 if (efx->type->revision >= reg->min_revision && 2043 efx->type->revision <= reg->max_revision) 2044 len += sizeof(efx_oword_t); 2045 2046 for (table = efx_nic_reg_tables; 2047 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2048 table++) 2049 if (efx->type->revision >= table->min_revision && 2050 efx->type->revision <= table->max_revision) 2051 len += table->rows * min_t(size_t, table->step, 16); 2052 2053 return len; 2054 } 2055 2056 void efx_nic_get_regs(struct efx_nic *efx, void *buf) 2057 { 2058 const struct efx_nic_reg *reg; 2059 const struct efx_nic_reg_table *table; 2060 2061 for (reg = efx_nic_regs; 2062 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 2063 reg++) { 2064 if (efx->type->revision >= reg->min_revision && 2065 efx->type->revision <= reg->max_revision) { 2066 efx_reado(efx, (efx_oword_t *)buf, reg->offset); 2067 buf += sizeof(efx_oword_t); 2068 } 2069 } 2070 2071 for (table = efx_nic_reg_tables; 2072 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2073 table++) { 2074 size_t size, i; 2075 2076 if (!(efx->type->revision >= table->min_revision && 2077 efx->type->revision <= table->max_revision)) 2078 continue; 2079 2080 size = min_t(size_t, table->step, 16); 2081 2082 for (i = 0; i < table->rows; i++) { 2083 switch (table->step) { 2084 case 4: /* 32-bit SRAM */ 2085 efx_readd(efx, buf, table->offset + 4 * i); 2086 break; 2087 case 8: /* 64-bit SRAM */ 2088 efx_sram_readq(efx, 2089 efx->membase + table->offset, 2090 buf, i); 2091 break; 2092 case 16: /* 128-bit-readable register */ 2093 efx_reado_table(efx, buf, table->offset, i); 2094 break; 2095 case 32: /* 128-bit register, interleaved */ 2096 efx_reado_table(efx, buf, table->offset, 2 * i); 2097 break; 2098 default: 2099 WARN_ON(1); 2100 return; 2101 } 2102 buf += size; 2103 } 2104 } 2105 } 2106