1 /**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2011 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/pci.h> 15 #include <linux/module.h> 16 #include <linux/seq_file.h> 17 #include "net_driver.h" 18 #include "bitfield.h" 19 #include "efx.h" 20 #include "nic.h" 21 #include "regs.h" 22 #include "io.h" 23 #include "workarounds.h" 24 25 /************************************************************************** 26 * 27 * Configurable values 28 * 29 ************************************************************************** 30 */ 31 32 /* This is set to 16 for a good reason. In summary, if larger than 33 * 16, the descriptor cache holds more than a default socket 34 * buffer's worth of packets (for UDP we can only have at most one 35 * socket buffer's worth outstanding). This combined with the fact 36 * that we only get 1 TX event per descriptor cache means the NIC 37 * goes idle. 38 */ 39 #define TX_DC_ENTRIES 16 40 #define TX_DC_ENTRIES_ORDER 1 41 42 #define RX_DC_ENTRIES 64 43 #define RX_DC_ENTRIES_ORDER 3 44 45 /* If EFX_MAX_INT_ERRORS internal errors occur within 46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 47 * disable it. 48 */ 49 #define EFX_INT_ERROR_EXPIRE 3600 50 #define EFX_MAX_INT_ERRORS 5 51 52 /* Depth of RX flush request fifo */ 53 #define EFX_RX_FLUSH_COUNT 4 54 55 /* Driver generated events */ 56 #define _EFX_CHANNEL_MAGIC_TEST 0x000101 57 #define _EFX_CHANNEL_MAGIC_FILL 0x000102 58 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 59 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 60 61 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 62 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 63 64 #define EFX_CHANNEL_MAGIC_TEST(_channel) \ 65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) 66 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ 67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ 68 efx_rx_queue_index(_rx_queue)) 69 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ 70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ 71 efx_rx_queue_index(_rx_queue)) 72 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ 73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ 74 (_tx_queue)->queue) 75 76 /************************************************************************** 77 * 78 * Solarstorm hardware access 79 * 80 **************************************************************************/ 81 82 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 83 unsigned int index) 84 { 85 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 86 value, index); 87 } 88 89 /* Read the current event from the event queue */ 90 static inline efx_qword_t *efx_event(struct efx_channel *channel, 91 unsigned int index) 92 { 93 return ((efx_qword_t *) (channel->eventq.addr)) + 94 (index & channel->eventq_mask); 95 } 96 97 /* See if an event is present 98 * 99 * We check both the high and low dword of the event for all ones. We 100 * wrote all ones when we cleared the event, and no valid event can 101 * have all ones in either its high or low dwords. This approach is 102 * robust against reordering. 103 * 104 * Note that using a single 64-bit comparison is incorrect; even 105 * though the CPU read will be atomic, the DMA write may not be. 106 */ 107 static inline int efx_event_present(efx_qword_t *event) 108 { 109 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 110 EFX_DWORD_IS_ALL_ONES(event->dword[1])); 111 } 112 113 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 114 const efx_oword_t *mask) 115 { 116 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 117 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 118 } 119 120 int efx_nic_test_registers(struct efx_nic *efx, 121 const struct efx_nic_register_test *regs, 122 size_t n_regs) 123 { 124 unsigned address = 0, i, j; 125 efx_oword_t mask, imask, original, reg, buf; 126 127 for (i = 0; i < n_regs; ++i) { 128 address = regs[i].address; 129 mask = imask = regs[i].mask; 130 EFX_INVERT_OWORD(imask); 131 132 efx_reado(efx, &original, address); 133 134 /* bit sweep on and off */ 135 for (j = 0; j < 128; j++) { 136 if (!EFX_EXTRACT_OWORD32(mask, j, j)) 137 continue; 138 139 /* Test this testable bit can be set in isolation */ 140 EFX_AND_OWORD(reg, original, mask); 141 EFX_SET_OWORD32(reg, j, j, 1); 142 143 efx_writeo(efx, ®, address); 144 efx_reado(efx, &buf, address); 145 146 if (efx_masked_compare_oword(®, &buf, &mask)) 147 goto fail; 148 149 /* Test this testable bit can be cleared in isolation */ 150 EFX_OR_OWORD(reg, original, mask); 151 EFX_SET_OWORD32(reg, j, j, 0); 152 153 efx_writeo(efx, ®, address); 154 efx_reado(efx, &buf, address); 155 156 if (efx_masked_compare_oword(®, &buf, &mask)) 157 goto fail; 158 } 159 160 efx_writeo(efx, &original, address); 161 } 162 163 return 0; 164 165 fail: 166 netif_err(efx, hw, efx->net_dev, 167 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 168 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 169 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 170 return -EIO; 171 } 172 173 /************************************************************************** 174 * 175 * Special buffer handling 176 * Special buffers are used for event queues and the TX and RX 177 * descriptor rings. 178 * 179 *************************************************************************/ 180 181 /* 182 * Initialise a special buffer 183 * 184 * This will define a buffer (previously allocated via 185 * efx_alloc_special_buffer()) in the buffer table, allowing 186 * it to be used for event queues, descriptor rings etc. 187 */ 188 static void 189 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 190 { 191 efx_qword_t buf_desc; 192 unsigned int index; 193 dma_addr_t dma_addr; 194 int i; 195 196 EFX_BUG_ON_PARANOID(!buffer->addr); 197 198 /* Write buffer descriptors to NIC */ 199 for (i = 0; i < buffer->entries; i++) { 200 index = buffer->index + i; 201 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE); 202 netif_dbg(efx, probe, efx->net_dev, 203 "mapping special buffer %d at %llx\n", 204 index, (unsigned long long)dma_addr); 205 EFX_POPULATE_QWORD_3(buf_desc, 206 FRF_AZ_BUF_ADR_REGION, 0, 207 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 208 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 209 efx_write_buf_tbl(efx, &buf_desc, index); 210 } 211 } 212 213 /* Unmaps a buffer and clears the buffer table entries */ 214 static void 215 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 216 { 217 efx_oword_t buf_tbl_upd; 218 unsigned int start = buffer->index; 219 unsigned int end = (buffer->index + buffer->entries - 1); 220 221 if (!buffer->entries) 222 return; 223 224 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 225 buffer->index, buffer->index + buffer->entries - 1); 226 227 EFX_POPULATE_OWORD_4(buf_tbl_upd, 228 FRF_AZ_BUF_UPD_CMD, 0, 229 FRF_AZ_BUF_CLR_CMD, 1, 230 FRF_AZ_BUF_CLR_END_ID, end, 231 FRF_AZ_BUF_CLR_START_ID, start); 232 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 233 } 234 235 /* 236 * Allocate a new special buffer 237 * 238 * This allocates memory for a new buffer, clears it and allocates a 239 * new buffer ID range. It does not write into the buffer table. 240 * 241 * This call will allocate 4KB buffers, since 8KB buffers can't be 242 * used for event queues and descriptor rings. 243 */ 244 static int efx_alloc_special_buffer(struct efx_nic *efx, 245 struct efx_special_buffer *buffer, 246 unsigned int len) 247 { 248 len = ALIGN(len, EFX_BUF_SIZE); 249 250 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 251 &buffer->dma_addr, GFP_KERNEL); 252 if (!buffer->addr) 253 return -ENOMEM; 254 buffer->len = len; 255 buffer->entries = len / EFX_BUF_SIZE; 256 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); 257 258 /* All zeros is a potentially valid event so memset to 0xff */ 259 memset(buffer->addr, 0xff, len); 260 261 /* Select new buffer ID */ 262 buffer->index = efx->next_buffer_table; 263 efx->next_buffer_table += buffer->entries; 264 #ifdef CONFIG_SFC_SRIOV 265 BUG_ON(efx_sriov_enabled(efx) && 266 efx->vf_buftbl_base < efx->next_buffer_table); 267 #endif 268 269 netif_dbg(efx, probe, efx->net_dev, 270 "allocating special buffers %d-%d at %llx+%x " 271 "(virt %p phys %llx)\n", buffer->index, 272 buffer->index + buffer->entries - 1, 273 (u64)buffer->dma_addr, len, 274 buffer->addr, (u64)virt_to_phys(buffer->addr)); 275 276 return 0; 277 } 278 279 static void 280 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 281 { 282 if (!buffer->addr) 283 return; 284 285 netif_dbg(efx, hw, efx->net_dev, 286 "deallocating special buffers %d-%d at %llx+%x " 287 "(virt %p phys %llx)\n", buffer->index, 288 buffer->index + buffer->entries - 1, 289 (u64)buffer->dma_addr, buffer->len, 290 buffer->addr, (u64)virt_to_phys(buffer->addr)); 291 292 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, 293 buffer->dma_addr); 294 buffer->addr = NULL; 295 buffer->entries = 0; 296 } 297 298 /************************************************************************** 299 * 300 * Generic buffer handling 301 * These buffers are used for interrupt status and MAC stats 302 * 303 **************************************************************************/ 304 305 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 306 unsigned int len) 307 { 308 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 309 &buffer->dma_addr, GFP_ATOMIC); 310 if (!buffer->addr) 311 return -ENOMEM; 312 buffer->len = len; 313 memset(buffer->addr, 0, len); 314 return 0; 315 } 316 317 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 318 { 319 if (buffer->addr) { 320 dma_free_coherent(&efx->pci_dev->dev, buffer->len, 321 buffer->addr, buffer->dma_addr); 322 buffer->addr = NULL; 323 } 324 } 325 326 /************************************************************************** 327 * 328 * TX path 329 * 330 **************************************************************************/ 331 332 /* Returns a pointer to the specified transmit descriptor in the TX 333 * descriptor queue belonging to the specified channel. 334 */ 335 static inline efx_qword_t * 336 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 337 { 338 return ((efx_qword_t *) (tx_queue->txd.addr)) + index; 339 } 340 341 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 342 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) 343 { 344 unsigned write_ptr; 345 efx_dword_t reg; 346 347 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 348 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 349 efx_writed_page(tx_queue->efx, ®, 350 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 351 } 352 353 /* Write pointer and first descriptor for TX descriptor ring */ 354 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, 355 const efx_qword_t *txd) 356 { 357 unsigned write_ptr; 358 efx_oword_t reg; 359 360 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 361 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 362 363 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 364 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 365 FRF_AZ_TX_DESC_WPTR, write_ptr); 366 reg.qword[0] = *txd; 367 efx_writeo_page(tx_queue->efx, ®, 368 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 369 } 370 371 static inline bool 372 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) 373 { 374 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 375 376 if (empty_read_count == 0) 377 return false; 378 379 tx_queue->empty_read_count = 0; 380 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; 381 } 382 383 /* For each entry inserted into the software descriptor ring, create a 384 * descriptor in the hardware TX descriptor ring (in host memory), and 385 * write a doorbell. 386 */ 387 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 388 { 389 390 struct efx_tx_buffer *buffer; 391 efx_qword_t *txd; 392 unsigned write_ptr; 393 unsigned old_write_count = tx_queue->write_count; 394 395 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 396 397 do { 398 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 399 buffer = &tx_queue->buffer[write_ptr]; 400 txd = efx_tx_desc(tx_queue, write_ptr); 401 ++tx_queue->write_count; 402 403 /* Create TX descriptor ring entry */ 404 EFX_POPULATE_QWORD_4(*txd, 405 FSF_AZ_TX_KER_CONT, buffer->continuation, 406 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 407 FSF_AZ_TX_KER_BUF_REGION, 0, 408 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 409 } while (tx_queue->write_count != tx_queue->insert_count); 410 411 wmb(); /* Ensure descriptors are written before they are fetched */ 412 413 if (efx_may_push_tx_desc(tx_queue, old_write_count)) { 414 txd = efx_tx_desc(tx_queue, 415 old_write_count & tx_queue->ptr_mask); 416 efx_push_tx_desc(tx_queue, txd); 417 ++tx_queue->pushes; 418 } else { 419 efx_notify_tx_desc(tx_queue); 420 } 421 } 422 423 /* Allocate hardware resources for a TX queue */ 424 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 425 { 426 struct efx_nic *efx = tx_queue->efx; 427 unsigned entries; 428 429 entries = tx_queue->ptr_mask + 1; 430 return efx_alloc_special_buffer(efx, &tx_queue->txd, 431 entries * sizeof(efx_qword_t)); 432 } 433 434 void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 435 { 436 struct efx_nic *efx = tx_queue->efx; 437 efx_oword_t reg; 438 439 /* Pin TX descriptor ring */ 440 efx_init_special_buffer(efx, &tx_queue->txd); 441 442 /* Push TX descriptor ring to card */ 443 EFX_POPULATE_OWORD_10(reg, 444 FRF_AZ_TX_DESCQ_EN, 1, 445 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 446 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 447 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 448 FRF_AZ_TX_DESCQ_EVQ_ID, 449 tx_queue->channel->channel, 450 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 451 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 452 FRF_AZ_TX_DESCQ_SIZE, 453 __ffs(tx_queue->txd.entries), 454 FRF_AZ_TX_DESCQ_TYPE, 0, 455 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 456 457 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 458 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 459 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 460 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, 461 !csum); 462 } 463 464 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 465 tx_queue->queue); 466 467 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 468 /* Only 128 bits in this register */ 469 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 470 471 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 472 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 473 clear_bit_le(tx_queue->queue, (void *)®); 474 else 475 set_bit_le(tx_queue->queue, (void *)®); 476 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 477 } 478 479 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 480 EFX_POPULATE_OWORD_1(reg, 481 FRF_BZ_TX_PACE, 482 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 483 FFE_BZ_TX_PACE_OFF : 484 FFE_BZ_TX_PACE_RESERVED); 485 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, 486 tx_queue->queue); 487 } 488 } 489 490 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 491 { 492 struct efx_nic *efx = tx_queue->efx; 493 efx_oword_t tx_flush_descq; 494 495 EFX_POPULATE_OWORD_2(tx_flush_descq, 496 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 497 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 498 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 499 } 500 501 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) 502 { 503 struct efx_nic *efx = tx_queue->efx; 504 efx_oword_t tx_desc_ptr; 505 506 /* Remove TX descriptor ring from card */ 507 EFX_ZERO_OWORD(tx_desc_ptr); 508 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 509 tx_queue->queue); 510 511 /* Unpin TX descriptor ring */ 512 efx_fini_special_buffer(efx, &tx_queue->txd); 513 } 514 515 /* Free buffers backing TX queue */ 516 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 517 { 518 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 519 } 520 521 /************************************************************************** 522 * 523 * RX path 524 * 525 **************************************************************************/ 526 527 /* Returns a pointer to the specified descriptor in the RX descriptor queue */ 528 static inline efx_qword_t * 529 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 530 { 531 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; 532 } 533 534 /* This creates an entry in the RX descriptor queue */ 535 static inline void 536 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 537 { 538 struct efx_rx_buffer *rx_buf; 539 efx_qword_t *rxd; 540 541 rxd = efx_rx_desc(rx_queue, index); 542 rx_buf = efx_rx_buffer(rx_queue, index); 543 EFX_POPULATE_QWORD_3(*rxd, 544 FSF_AZ_RX_KER_BUF_SIZE, 545 rx_buf->len - 546 rx_queue->efx->type->rx_buffer_padding, 547 FSF_AZ_RX_KER_BUF_REGION, 0, 548 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 549 } 550 551 /* This writes to the RX_DESC_WPTR register for the specified receive 552 * descriptor ring. 553 */ 554 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 555 { 556 struct efx_nic *efx = rx_queue->efx; 557 efx_dword_t reg; 558 unsigned write_ptr; 559 560 while (rx_queue->notified_count != rx_queue->added_count) { 561 efx_build_rx_desc( 562 rx_queue, 563 rx_queue->notified_count & rx_queue->ptr_mask); 564 ++rx_queue->notified_count; 565 } 566 567 wmb(); 568 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 569 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 570 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 571 efx_rx_queue_index(rx_queue)); 572 } 573 574 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 575 { 576 struct efx_nic *efx = rx_queue->efx; 577 unsigned entries; 578 579 entries = rx_queue->ptr_mask + 1; 580 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 581 entries * sizeof(efx_qword_t)); 582 } 583 584 void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 585 { 586 efx_oword_t rx_desc_ptr; 587 struct efx_nic *efx = rx_queue->efx; 588 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 589 bool iscsi_digest_en = is_b0; 590 591 netif_dbg(efx, hw, efx->net_dev, 592 "RX queue %d ring in special buffers %d-%d\n", 593 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 594 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 595 596 /* Pin RX descriptor ring */ 597 efx_init_special_buffer(efx, &rx_queue->rxd); 598 599 /* Push RX descriptor ring to card */ 600 EFX_POPULATE_OWORD_10(rx_desc_ptr, 601 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 602 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 603 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 604 FRF_AZ_RX_DESCQ_EVQ_ID, 605 efx_rx_queue_channel(rx_queue)->channel, 606 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 607 FRF_AZ_RX_DESCQ_LABEL, 608 efx_rx_queue_index(rx_queue), 609 FRF_AZ_RX_DESCQ_SIZE, 610 __ffs(rx_queue->rxd.entries), 611 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 612 /* For >=B0 this is scatter so disable */ 613 FRF_AZ_RX_DESCQ_JUMBO, !is_b0, 614 FRF_AZ_RX_DESCQ_EN, 1); 615 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 616 efx_rx_queue_index(rx_queue)); 617 } 618 619 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 620 { 621 struct efx_nic *efx = rx_queue->efx; 622 efx_oword_t rx_flush_descq; 623 624 EFX_POPULATE_OWORD_2(rx_flush_descq, 625 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 626 FRF_AZ_RX_FLUSH_DESCQ, 627 efx_rx_queue_index(rx_queue)); 628 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 629 } 630 631 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) 632 { 633 efx_oword_t rx_desc_ptr; 634 struct efx_nic *efx = rx_queue->efx; 635 636 /* Remove RX descriptor ring from card */ 637 EFX_ZERO_OWORD(rx_desc_ptr); 638 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 639 efx_rx_queue_index(rx_queue)); 640 641 /* Unpin RX descriptor ring */ 642 efx_fini_special_buffer(efx, &rx_queue->rxd); 643 } 644 645 /* Free buffers backing RX queue */ 646 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 647 { 648 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 649 } 650 651 /************************************************************************** 652 * 653 * Flush handling 654 * 655 **************************************************************************/ 656 657 /* efx_nic_flush_queues() must be woken up when all flushes are completed, 658 * or more RX flushes can be kicked off. 659 */ 660 static bool efx_flush_wake(struct efx_nic *efx) 661 { 662 /* Ensure that all updates are visible to efx_nic_flush_queues() */ 663 smp_mb(); 664 665 return (atomic_read(&efx->drain_pending) == 0 || 666 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT 667 && atomic_read(&efx->rxq_flush_pending) > 0)); 668 } 669 670 /* Flush all the transmit queues, and continue flushing receive queues until 671 * they're all flushed. Wait for the DRAIN events to be recieved so that there 672 * are no more RX and TX events left on any channel. */ 673 int efx_nic_flush_queues(struct efx_nic *efx) 674 { 675 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ 676 struct efx_channel *channel; 677 struct efx_rx_queue *rx_queue; 678 struct efx_tx_queue *tx_queue; 679 int rc = 0; 680 681 efx->fc_disable++; 682 efx->type->prepare_flush(efx); 683 684 efx_for_each_channel(channel, efx) { 685 efx_for_each_channel_tx_queue(tx_queue, channel) { 686 atomic_inc(&efx->drain_pending); 687 efx_flush_tx_queue(tx_queue); 688 } 689 efx_for_each_channel_rx_queue(rx_queue, channel) { 690 atomic_inc(&efx->drain_pending); 691 rx_queue->flush_pending = true; 692 atomic_inc(&efx->rxq_flush_pending); 693 } 694 } 695 696 while (timeout && atomic_read(&efx->drain_pending) > 0) { 697 /* If SRIOV is enabled, then offload receive queue flushing to 698 * the firmware (though we will still have to poll for 699 * completion). If that fails, fall back to the old scheme. 700 */ 701 if (efx_sriov_enabled(efx)) { 702 rc = efx_mcdi_flush_rxqs(efx); 703 if (!rc) 704 goto wait; 705 } 706 707 /* The hardware supports four concurrent rx flushes, each of 708 * which may need to be retried if there is an outstanding 709 * descriptor fetch 710 */ 711 efx_for_each_channel(channel, efx) { 712 efx_for_each_channel_rx_queue(rx_queue, channel) { 713 if (atomic_read(&efx->rxq_flush_outstanding) >= 714 EFX_RX_FLUSH_COUNT) 715 break; 716 717 if (rx_queue->flush_pending) { 718 rx_queue->flush_pending = false; 719 atomic_dec(&efx->rxq_flush_pending); 720 atomic_inc(&efx->rxq_flush_outstanding); 721 efx_flush_rx_queue(rx_queue); 722 } 723 } 724 } 725 726 wait: 727 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx), 728 timeout); 729 } 730 731 if (atomic_read(&efx->drain_pending)) { 732 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 733 "(rx %d+%d)\n", atomic_read(&efx->drain_pending), 734 atomic_read(&efx->rxq_flush_outstanding), 735 atomic_read(&efx->rxq_flush_pending)); 736 rc = -ETIMEDOUT; 737 738 atomic_set(&efx->drain_pending, 0); 739 atomic_set(&efx->rxq_flush_pending, 0); 740 atomic_set(&efx->rxq_flush_outstanding, 0); 741 } 742 743 efx->fc_disable--; 744 745 return rc; 746 } 747 748 /************************************************************************** 749 * 750 * Event queue processing 751 * Event queues are processed by per-channel tasklets. 752 * 753 **************************************************************************/ 754 755 /* Update a channel's event queue's read pointer (RPTR) register 756 * 757 * This writes the EVQ_RPTR_REG register for the specified channel's 758 * event queue. 759 */ 760 void efx_nic_eventq_read_ack(struct efx_channel *channel) 761 { 762 efx_dword_t reg; 763 struct efx_nic *efx = channel->efx; 764 765 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 766 channel->eventq_read_ptr & channel->eventq_mask); 767 efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, 768 channel->channel); 769 } 770 771 /* Use HW to insert a SW defined event */ 772 void efx_generate_event(struct efx_nic *efx, unsigned int evq, 773 efx_qword_t *event) 774 { 775 efx_oword_t drv_ev_reg; 776 777 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 778 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 779 drv_ev_reg.u32[0] = event->u32[0]; 780 drv_ev_reg.u32[1] = event->u32[1]; 781 drv_ev_reg.u32[2] = 0; 782 drv_ev_reg.u32[3] = 0; 783 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); 784 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); 785 } 786 787 static void efx_magic_event(struct efx_channel *channel, u32 magic) 788 { 789 efx_qword_t event; 790 791 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 792 FSE_AZ_EV_CODE_DRV_GEN_EV, 793 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 794 efx_generate_event(channel->efx, channel->channel, &event); 795 } 796 797 /* Handle a transmit completion event 798 * 799 * The NIC batches TX completion events; the message we receive is of 800 * the form "complete all TX events up to this index". 801 */ 802 static int 803 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 804 { 805 unsigned int tx_ev_desc_ptr; 806 unsigned int tx_ev_q_label; 807 struct efx_tx_queue *tx_queue; 808 struct efx_nic *efx = channel->efx; 809 int tx_packets = 0; 810 811 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 812 return 0; 813 814 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 815 /* Transmit completion */ 816 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 817 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 818 tx_queue = efx_channel_get_tx_queue( 819 channel, tx_ev_q_label % EFX_TXQ_TYPES); 820 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 821 tx_queue->ptr_mask); 822 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 823 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 824 /* Rewrite the FIFO write pointer */ 825 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 826 tx_queue = efx_channel_get_tx_queue( 827 channel, tx_ev_q_label % EFX_TXQ_TYPES); 828 829 netif_tx_lock(efx->net_dev); 830 efx_notify_tx_desc(tx_queue); 831 netif_tx_unlock(efx->net_dev); 832 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 833 EFX_WORKAROUND_10727(efx)) { 834 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 835 } else { 836 netif_err(efx, tx_err, efx->net_dev, 837 "channel %d unexpected TX event " 838 EFX_QWORD_FMT"\n", channel->channel, 839 EFX_QWORD_VAL(*event)); 840 } 841 842 return tx_packets; 843 } 844 845 /* Detect errors included in the rx_evt_pkt_ok bit. */ 846 static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 847 const efx_qword_t *event) 848 { 849 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 850 struct efx_nic *efx = rx_queue->efx; 851 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 852 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 853 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 854 bool rx_ev_other_err, rx_ev_pause_frm; 855 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 856 unsigned rx_ev_pkt_type; 857 858 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 859 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 860 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 861 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 862 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 863 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 864 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 865 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 866 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 867 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 868 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 869 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 870 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 871 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 872 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 873 874 /* Every error apart from tobe_disc and pause_frm */ 875 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 876 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 877 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 878 879 /* Count errors that are not in MAC stats. Ignore expected 880 * checksum errors during self-test. */ 881 if (rx_ev_frm_trunc) 882 ++channel->n_rx_frm_trunc; 883 else if (rx_ev_tobe_disc) 884 ++channel->n_rx_tobe_disc; 885 else if (!efx->loopback_selftest) { 886 if (rx_ev_ip_hdr_chksum_err) 887 ++channel->n_rx_ip_hdr_chksum_err; 888 else if (rx_ev_tcp_udp_chksum_err) 889 ++channel->n_rx_tcp_udp_chksum_err; 890 } 891 892 /* TOBE_DISC is expected on unicast mismatches; don't print out an 893 * error message. FRM_TRUNC indicates RXDP dropped the packet due 894 * to a FIFO overflow. 895 */ 896 #ifdef DEBUG 897 if (rx_ev_other_err && net_ratelimit()) { 898 netif_dbg(efx, rx_err, efx->net_dev, 899 " RX queue %d unexpected RX event " 900 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 901 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 902 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 903 rx_ev_ip_hdr_chksum_err ? 904 " [IP_HDR_CHKSUM_ERR]" : "", 905 rx_ev_tcp_udp_chksum_err ? 906 " [TCP_UDP_CHKSUM_ERR]" : "", 907 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 908 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 909 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 910 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 911 rx_ev_pause_frm ? " [PAUSE]" : ""); 912 } 913 #endif 914 915 /* The frame must be discarded if any of these are true. */ 916 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 917 rx_ev_tobe_disc | rx_ev_pause_frm) ? 918 EFX_RX_PKT_DISCARD : 0; 919 } 920 921 /* Handle receive events that are not in-order. */ 922 static void 923 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 924 { 925 struct efx_nic *efx = rx_queue->efx; 926 unsigned expected, dropped; 927 928 expected = rx_queue->removed_count & rx_queue->ptr_mask; 929 dropped = (index - expected) & rx_queue->ptr_mask; 930 netif_info(efx, rx_err, efx->net_dev, 931 "dropped %d events (index=%d expected=%d)\n", 932 dropped, index, expected); 933 934 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 935 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 936 } 937 938 /* Handle a packet received event 939 * 940 * The NIC gives a "discard" flag if it's a unicast packet with the 941 * wrong destination address 942 * Also "is multicast" and "matches multicast filter" flags can be used to 943 * discard non-matching multicast packets. 944 */ 945 static void 946 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 947 { 948 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 949 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 950 unsigned expected_ptr; 951 bool rx_ev_pkt_ok; 952 u16 flags; 953 struct efx_rx_queue *rx_queue; 954 struct efx_nic *efx = channel->efx; 955 956 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 957 return; 958 959 /* Basic packet information */ 960 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 961 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 962 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 963 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); 964 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); 965 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 966 channel->channel); 967 968 rx_queue = efx_channel_get_rx_queue(channel); 969 970 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 971 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 972 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 973 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 974 975 if (likely(rx_ev_pkt_ok)) { 976 /* If packet is marked as OK and packet type is TCP/IP or 977 * UDP/IP, then we can rely on the hardware checksum. 978 */ 979 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 980 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ? 981 EFX_RX_PKT_CSUMMED : 0; 982 } else { 983 flags = efx_handle_rx_not_ok(rx_queue, event); 984 } 985 986 /* Detect multicast packets that didn't match the filter */ 987 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 988 if (rx_ev_mcast_pkt) { 989 unsigned int rx_ev_mcast_hash_match = 990 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 991 992 if (unlikely(!rx_ev_mcast_hash_match)) { 993 ++channel->n_rx_mcast_mismatch; 994 flags |= EFX_RX_PKT_DISCARD; 995 } 996 } 997 998 channel->irq_mod_score += 2; 999 1000 /* Handle received packet */ 1001 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); 1002 } 1003 1004 /* If this flush done event corresponds to a &struct efx_tx_queue, then 1005 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue 1006 * of all transmit completions. 1007 */ 1008 static void 1009 efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1010 { 1011 struct efx_tx_queue *tx_queue; 1012 int qid; 1013 1014 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1015 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { 1016 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, 1017 qid % EFX_TXQ_TYPES); 1018 1019 efx_magic_event(tx_queue->channel, 1020 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 1021 } 1022 } 1023 1024 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush 1025 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add 1026 * the RX queue back to the mask of RX queues in need of flushing. 1027 */ 1028 static void 1029 efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1030 { 1031 struct efx_channel *channel; 1032 struct efx_rx_queue *rx_queue; 1033 int qid; 1034 bool failed; 1035 1036 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1037 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1038 if (qid >= efx->n_channels) 1039 return; 1040 channel = efx_get_channel(efx, qid); 1041 if (!efx_channel_has_rx_queue(channel)) 1042 return; 1043 rx_queue = efx_channel_get_rx_queue(channel); 1044 1045 if (failed) { 1046 netif_info(efx, hw, efx->net_dev, 1047 "RXQ %d flush retry\n", qid); 1048 rx_queue->flush_pending = true; 1049 atomic_inc(&efx->rxq_flush_pending); 1050 } else { 1051 efx_magic_event(efx_rx_queue_channel(rx_queue), 1052 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); 1053 } 1054 atomic_dec(&efx->rxq_flush_outstanding); 1055 if (efx_flush_wake(efx)) 1056 wake_up(&efx->flush_wq); 1057 } 1058 1059 static void 1060 efx_handle_drain_event(struct efx_channel *channel) 1061 { 1062 struct efx_nic *efx = channel->efx; 1063 1064 WARN_ON(atomic_read(&efx->drain_pending) == 0); 1065 atomic_dec(&efx->drain_pending); 1066 if (efx_flush_wake(efx)) 1067 wake_up(&efx->flush_wq); 1068 } 1069 1070 static void 1071 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 1072 { 1073 struct efx_nic *efx = channel->efx; 1074 struct efx_rx_queue *rx_queue = 1075 efx_channel_has_rx_queue(channel) ? 1076 efx_channel_get_rx_queue(channel) : NULL; 1077 unsigned magic, code; 1078 1079 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 1080 code = _EFX_CHANNEL_MAGIC_CODE(magic); 1081 1082 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { 1083 channel->event_test_cpu = raw_smp_processor_id(); 1084 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { 1085 /* The queue must be empty, so we won't receive any rx 1086 * events, so efx_process_channel() won't refill the 1087 * queue. Refill it here */ 1088 efx_fast_push_rx_descriptors(rx_queue); 1089 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { 1090 rx_queue->enabled = false; 1091 efx_handle_drain_event(channel); 1092 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { 1093 efx_handle_drain_event(channel); 1094 } else { 1095 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1096 "generated event "EFX_QWORD_FMT"\n", 1097 channel->channel, EFX_QWORD_VAL(*event)); 1098 } 1099 } 1100 1101 static void 1102 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 1103 { 1104 struct efx_nic *efx = channel->efx; 1105 unsigned int ev_sub_code; 1106 unsigned int ev_sub_data; 1107 1108 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 1109 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1110 1111 switch (ev_sub_code) { 1112 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1113 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1114 channel->channel, ev_sub_data); 1115 efx_handle_tx_flush_done(efx, event); 1116 efx_sriov_tx_flush_done(efx, event); 1117 break; 1118 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1119 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1120 channel->channel, ev_sub_data); 1121 efx_handle_rx_flush_done(efx, event); 1122 efx_sriov_rx_flush_done(efx, event); 1123 break; 1124 case FSE_AZ_EVQ_INIT_DONE_EV: 1125 netif_dbg(efx, hw, efx->net_dev, 1126 "channel %d EVQ %d initialised\n", 1127 channel->channel, ev_sub_data); 1128 break; 1129 case FSE_AZ_SRM_UPD_DONE_EV: 1130 netif_vdbg(efx, hw, efx->net_dev, 1131 "channel %d SRAM update done\n", channel->channel); 1132 break; 1133 case FSE_AZ_WAKE_UP_EV: 1134 netif_vdbg(efx, hw, efx->net_dev, 1135 "channel %d RXQ %d wakeup event\n", 1136 channel->channel, ev_sub_data); 1137 break; 1138 case FSE_AZ_TIMER_EV: 1139 netif_vdbg(efx, hw, efx->net_dev, 1140 "channel %d RX queue %d timer expired\n", 1141 channel->channel, ev_sub_data); 1142 break; 1143 case FSE_AA_RX_RECOVER_EV: 1144 netif_err(efx, rx_err, efx->net_dev, 1145 "channel %d seen DRIVER RX_RESET event. " 1146 "Resetting.\n", channel->channel); 1147 atomic_inc(&efx->rx_reset); 1148 efx_schedule_reset(efx, 1149 EFX_WORKAROUND_6555(efx) ? 1150 RESET_TYPE_RX_RECOVERY : 1151 RESET_TYPE_DISABLE); 1152 break; 1153 case FSE_BZ_RX_DSC_ERROR_EV: 1154 if (ev_sub_data < EFX_VI_BASE) { 1155 netif_err(efx, rx_err, efx->net_dev, 1156 "RX DMA Q %d reports descriptor fetch error." 1157 " RX Q %d is disabled.\n", ev_sub_data, 1158 ev_sub_data); 1159 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 1160 } else 1161 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1162 break; 1163 case FSE_BZ_TX_DSC_ERROR_EV: 1164 if (ev_sub_data < EFX_VI_BASE) { 1165 netif_err(efx, tx_err, efx->net_dev, 1166 "TX DMA Q %d reports descriptor fetch error." 1167 " TX Q %d is disabled.\n", ev_sub_data, 1168 ev_sub_data); 1169 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 1170 } else 1171 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1172 break; 1173 default: 1174 netif_vdbg(efx, hw, efx->net_dev, 1175 "channel %d unknown driver event code %d " 1176 "data %04x\n", channel->channel, ev_sub_code, 1177 ev_sub_data); 1178 break; 1179 } 1180 } 1181 1182 int efx_nic_process_eventq(struct efx_channel *channel, int budget) 1183 { 1184 struct efx_nic *efx = channel->efx; 1185 unsigned int read_ptr; 1186 efx_qword_t event, *p_event; 1187 int ev_code; 1188 int tx_packets = 0; 1189 int spent = 0; 1190 1191 read_ptr = channel->eventq_read_ptr; 1192 1193 for (;;) { 1194 p_event = efx_event(channel, read_ptr); 1195 event = *p_event; 1196 1197 if (!efx_event_present(&event)) 1198 /* End of events */ 1199 break; 1200 1201 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1202 "channel %d event is "EFX_QWORD_FMT"\n", 1203 channel->channel, EFX_QWORD_VAL(event)); 1204 1205 /* Clear this event by marking it all ones */ 1206 EFX_SET_QWORD(*p_event); 1207 1208 ++read_ptr; 1209 1210 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1211 1212 switch (ev_code) { 1213 case FSE_AZ_EV_CODE_RX_EV: 1214 efx_handle_rx_event(channel, &event); 1215 if (++spent == budget) 1216 goto out; 1217 break; 1218 case FSE_AZ_EV_CODE_TX_EV: 1219 tx_packets += efx_handle_tx_event(channel, &event); 1220 if (tx_packets > efx->txq_entries) { 1221 spent = budget; 1222 goto out; 1223 } 1224 break; 1225 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1226 efx_handle_generated_event(channel, &event); 1227 break; 1228 case FSE_AZ_EV_CODE_DRIVER_EV: 1229 efx_handle_driver_event(channel, &event); 1230 break; 1231 case FSE_CZ_EV_CODE_USER_EV: 1232 efx_sriov_event(channel, &event); 1233 break; 1234 case FSE_CZ_EV_CODE_MCDI_EV: 1235 efx_mcdi_process_event(channel, &event); 1236 break; 1237 case FSE_AZ_EV_CODE_GLOBAL_EV: 1238 if (efx->type->handle_global_event && 1239 efx->type->handle_global_event(channel, &event)) 1240 break; 1241 /* else fall through */ 1242 default: 1243 netif_err(channel->efx, hw, channel->efx->net_dev, 1244 "channel %d unknown event type %d (data " 1245 EFX_QWORD_FMT ")\n", channel->channel, 1246 ev_code, EFX_QWORD_VAL(event)); 1247 } 1248 } 1249 1250 out: 1251 channel->eventq_read_ptr = read_ptr; 1252 return spent; 1253 } 1254 1255 /* Check whether an event is present in the eventq at the current 1256 * read pointer. Only useful for self-test. 1257 */ 1258 bool efx_nic_event_present(struct efx_channel *channel) 1259 { 1260 return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1261 } 1262 1263 /* Allocate buffer table entries for event queue */ 1264 int efx_nic_probe_eventq(struct efx_channel *channel) 1265 { 1266 struct efx_nic *efx = channel->efx; 1267 unsigned entries; 1268 1269 entries = channel->eventq_mask + 1; 1270 return efx_alloc_special_buffer(efx, &channel->eventq, 1271 entries * sizeof(efx_qword_t)); 1272 } 1273 1274 void efx_nic_init_eventq(struct efx_channel *channel) 1275 { 1276 efx_oword_t reg; 1277 struct efx_nic *efx = channel->efx; 1278 1279 netif_dbg(efx, hw, efx->net_dev, 1280 "channel %d event queue in special buffers %d-%d\n", 1281 channel->channel, channel->eventq.index, 1282 channel->eventq.index + channel->eventq.entries - 1); 1283 1284 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1285 EFX_POPULATE_OWORD_3(reg, 1286 FRF_CZ_TIMER_Q_EN, 1, 1287 FRF_CZ_HOST_NOTIFY_MODE, 0, 1288 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1289 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1290 } 1291 1292 /* Pin event queue buffer */ 1293 efx_init_special_buffer(efx, &channel->eventq); 1294 1295 /* Fill event queue with all ones (i.e. empty events) */ 1296 memset(channel->eventq.addr, 0xff, channel->eventq.len); 1297 1298 /* Push event queue to card */ 1299 EFX_POPULATE_OWORD_3(reg, 1300 FRF_AZ_EVQ_EN, 1, 1301 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1302 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1303 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1304 channel->channel); 1305 1306 efx->type->push_irq_moderation(channel); 1307 } 1308 1309 void efx_nic_fini_eventq(struct efx_channel *channel) 1310 { 1311 efx_oword_t reg; 1312 struct efx_nic *efx = channel->efx; 1313 1314 /* Remove event queue from card */ 1315 EFX_ZERO_OWORD(reg); 1316 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1317 channel->channel); 1318 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1319 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1320 1321 /* Unpin event queue */ 1322 efx_fini_special_buffer(efx, &channel->eventq); 1323 } 1324 1325 /* Free buffers backing event queue */ 1326 void efx_nic_remove_eventq(struct efx_channel *channel) 1327 { 1328 efx_free_special_buffer(channel->efx, &channel->eventq); 1329 } 1330 1331 1332 void efx_nic_event_test_start(struct efx_channel *channel) 1333 { 1334 channel->event_test_cpu = -1; 1335 smp_wmb(); 1336 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 1337 } 1338 1339 void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) 1340 { 1341 efx_magic_event(efx_rx_queue_channel(rx_queue), 1342 EFX_CHANNEL_MAGIC_FILL(rx_queue)); 1343 } 1344 1345 /************************************************************************** 1346 * 1347 * Hardware interrupts 1348 * The hardware interrupt handler does very little work; all the event 1349 * queue processing is carried out by per-channel tasklets. 1350 * 1351 **************************************************************************/ 1352 1353 /* Enable/disable/generate interrupts */ 1354 static inline void efx_nic_interrupts(struct efx_nic *efx, 1355 bool enabled, bool force) 1356 { 1357 efx_oword_t int_en_reg_ker; 1358 1359 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1360 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1361 FRF_AZ_KER_INT_KER, force, 1362 FRF_AZ_DRV_INT_EN_KER, enabled); 1363 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1364 } 1365 1366 void efx_nic_enable_interrupts(struct efx_nic *efx) 1367 { 1368 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1369 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1370 1371 efx_nic_interrupts(efx, true, false); 1372 } 1373 1374 void efx_nic_disable_interrupts(struct efx_nic *efx) 1375 { 1376 /* Disable interrupts */ 1377 efx_nic_interrupts(efx, false, false); 1378 } 1379 1380 /* Generate a test interrupt 1381 * Interrupt must already have been enabled, otherwise nasty things 1382 * may happen. 1383 */ 1384 void efx_nic_irq_test_start(struct efx_nic *efx) 1385 { 1386 efx->last_irq_cpu = -1; 1387 smp_wmb(); 1388 efx_nic_interrupts(efx, true, true); 1389 } 1390 1391 /* Process a fatal interrupt 1392 * Disable bus mastering ASAP and schedule a reset 1393 */ 1394 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) 1395 { 1396 struct falcon_nic_data *nic_data = efx->nic_data; 1397 efx_oword_t *int_ker = efx->irq_status.addr; 1398 efx_oword_t fatal_intr; 1399 int error, mem_perr; 1400 1401 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1402 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1403 1404 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1405 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1406 EFX_OWORD_VAL(fatal_intr), 1407 error ? "disabling bus mastering" : "no recognised error"); 1408 1409 /* If this is a memory parity error dump which blocks are offending */ 1410 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1411 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1412 if (mem_perr) { 1413 efx_oword_t reg; 1414 efx_reado(efx, ®, FR_AZ_MEM_STAT); 1415 netif_err(efx, hw, efx->net_dev, 1416 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1417 EFX_OWORD_VAL(reg)); 1418 } 1419 1420 /* Disable both devices */ 1421 pci_clear_master(efx->pci_dev); 1422 if (efx_nic_is_dual_func(efx)) 1423 pci_clear_master(nic_data->pci_dev2); 1424 efx_nic_disable_interrupts(efx); 1425 1426 /* Count errors and reset or disable the NIC accordingly */ 1427 if (efx->int_error_count == 0 || 1428 time_after(jiffies, efx->int_error_expire)) { 1429 efx->int_error_count = 0; 1430 efx->int_error_expire = 1431 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1432 } 1433 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1434 netif_err(efx, hw, efx->net_dev, 1435 "SYSTEM ERROR - reset scheduled\n"); 1436 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1437 } else { 1438 netif_err(efx, hw, efx->net_dev, 1439 "SYSTEM ERROR - max number of errors seen." 1440 "NIC will be disabled\n"); 1441 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1442 } 1443 1444 return IRQ_HANDLED; 1445 } 1446 1447 /* Handle a legacy interrupt 1448 * Acknowledges the interrupt and schedule event queue processing. 1449 */ 1450 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) 1451 { 1452 struct efx_nic *efx = dev_id; 1453 efx_oword_t *int_ker = efx->irq_status.addr; 1454 irqreturn_t result = IRQ_NONE; 1455 struct efx_channel *channel; 1456 efx_dword_t reg; 1457 u32 queues; 1458 int syserr; 1459 1460 /* Could this be ours? If interrupts are disabled then the 1461 * channel state may not be valid. 1462 */ 1463 if (!efx->legacy_irq_enabled) 1464 return result; 1465 1466 /* Read the ISR which also ACKs the interrupts */ 1467 efx_readd(efx, ®, FR_BZ_INT_ISR0); 1468 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1469 1470 /* Handle non-event-queue sources */ 1471 if (queues & (1U << efx->irq_level)) { 1472 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1473 if (unlikely(syserr)) 1474 return efx_nic_fatal_interrupt(efx); 1475 efx->last_irq_cpu = raw_smp_processor_id(); 1476 } 1477 1478 if (queues != 0) { 1479 if (EFX_WORKAROUND_15783(efx)) 1480 efx->irq_zero_count = 0; 1481 1482 /* Schedule processing of any interrupting queues */ 1483 efx_for_each_channel(channel, efx) { 1484 if (queues & 1) 1485 efx_schedule_channel_irq(channel); 1486 queues >>= 1; 1487 } 1488 result = IRQ_HANDLED; 1489 1490 } else if (EFX_WORKAROUND_15783(efx)) { 1491 efx_qword_t *event; 1492 1493 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1494 * because this might be a shared interrupt. */ 1495 if (efx->irq_zero_count++ == 0) 1496 result = IRQ_HANDLED; 1497 1498 /* Ensure we schedule or rearm all event queues */ 1499 efx_for_each_channel(channel, efx) { 1500 event = efx_event(channel, channel->eventq_read_ptr); 1501 if (efx_event_present(event)) 1502 efx_schedule_channel_irq(channel); 1503 else 1504 efx_nic_eventq_read_ack(channel); 1505 } 1506 } 1507 1508 if (result == IRQ_HANDLED) 1509 netif_vdbg(efx, intr, efx->net_dev, 1510 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1511 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1512 1513 return result; 1514 } 1515 1516 /* Handle an MSI interrupt 1517 * 1518 * Handle an MSI hardware interrupt. This routine schedules event 1519 * queue processing. No interrupt acknowledgement cycle is necessary. 1520 * Also, we never need to check that the interrupt is for us, since 1521 * MSI interrupts cannot be shared. 1522 */ 1523 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1524 { 1525 struct efx_channel *channel = *(struct efx_channel **)dev_id; 1526 struct efx_nic *efx = channel->efx; 1527 efx_oword_t *int_ker = efx->irq_status.addr; 1528 int syserr; 1529 1530 netif_vdbg(efx, intr, efx->net_dev, 1531 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1532 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1533 1534 /* Handle non-event-queue sources */ 1535 if (channel->channel == efx->irq_level) { 1536 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1537 if (unlikely(syserr)) 1538 return efx_nic_fatal_interrupt(efx); 1539 efx->last_irq_cpu = raw_smp_processor_id(); 1540 } 1541 1542 /* Schedule processing of the channel */ 1543 efx_schedule_channel_irq(channel); 1544 1545 return IRQ_HANDLED; 1546 } 1547 1548 1549 /* Setup RSS indirection table. 1550 * This maps from the hash value of the packet to RXQ 1551 */ 1552 void efx_nic_push_rx_indir_table(struct efx_nic *efx) 1553 { 1554 size_t i = 0; 1555 efx_dword_t dword; 1556 1557 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) 1558 return; 1559 1560 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1561 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1562 1563 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1564 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1565 efx->rx_indir_table[i]); 1566 efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); 1567 } 1568 } 1569 1570 /* Hook interrupt handler(s) 1571 * Try MSI and then legacy interrupts. 1572 */ 1573 int efx_nic_init_interrupt(struct efx_nic *efx) 1574 { 1575 struct efx_channel *channel; 1576 int rc; 1577 1578 if (!EFX_INT_MODE_USE_MSI(efx)) { 1579 irq_handler_t handler; 1580 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1581 handler = efx_legacy_interrupt; 1582 else 1583 handler = falcon_legacy_interrupt_a1; 1584 1585 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, 1586 efx->name, efx); 1587 if (rc) { 1588 netif_err(efx, drv, efx->net_dev, 1589 "failed to hook legacy IRQ %d\n", 1590 efx->pci_dev->irq); 1591 goto fail1; 1592 } 1593 return 0; 1594 } 1595 1596 /* Hook MSI or MSI-X interrupt */ 1597 efx_for_each_channel(channel, efx) { 1598 rc = request_irq(channel->irq, efx_msi_interrupt, 1599 IRQF_PROBE_SHARED, /* Not shared */ 1600 efx->channel_name[channel->channel], 1601 &efx->channel[channel->channel]); 1602 if (rc) { 1603 netif_err(efx, drv, efx->net_dev, 1604 "failed to hook IRQ %d\n", channel->irq); 1605 goto fail2; 1606 } 1607 } 1608 1609 return 0; 1610 1611 fail2: 1612 efx_for_each_channel(channel, efx) 1613 free_irq(channel->irq, &efx->channel[channel->channel]); 1614 fail1: 1615 return rc; 1616 } 1617 1618 void efx_nic_fini_interrupt(struct efx_nic *efx) 1619 { 1620 struct efx_channel *channel; 1621 efx_oword_t reg; 1622 1623 /* Disable MSI/MSI-X interrupts */ 1624 efx_for_each_channel(channel, efx) { 1625 if (channel->irq) 1626 free_irq(channel->irq, &efx->channel[channel->channel]); 1627 } 1628 1629 /* ACK legacy interrupt */ 1630 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1631 efx_reado(efx, ®, FR_BZ_INT_ISR0); 1632 else 1633 falcon_irq_ack_a1(efx); 1634 1635 /* Disable legacy interrupt */ 1636 if (efx->legacy_irq) 1637 free_irq(efx->legacy_irq, efx); 1638 } 1639 1640 /* Looks at available SRAM resources and works out how many queues we 1641 * can support, and where things like descriptor caches should live. 1642 * 1643 * SRAM is split up as follows: 1644 * 0 buftbl entries for channels 1645 * efx->vf_buftbl_base buftbl entries for SR-IOV 1646 * efx->rx_dc_base RX descriptor caches 1647 * efx->tx_dc_base TX descriptor caches 1648 */ 1649 void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) 1650 { 1651 unsigned vi_count, buftbl_min; 1652 1653 /* Account for the buffer table entries backing the datapath channels 1654 * and the descriptor caches for those channels. 1655 */ 1656 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + 1657 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + 1658 efx->n_channels * EFX_MAX_EVQ_SIZE) 1659 * sizeof(efx_qword_t) / EFX_BUF_SIZE); 1660 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); 1661 1662 #ifdef CONFIG_SFC_SRIOV 1663 if (efx_sriov_wanted(efx)) { 1664 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; 1665 1666 efx->vf_buftbl_base = buftbl_min; 1667 1668 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; 1669 vi_count = max(vi_count, EFX_VI_BASE); 1670 buftbl_free = (sram_lim_qw - buftbl_min - 1671 vi_count * vi_dc_entries); 1672 1673 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * 1674 efx_vf_size(efx)); 1675 vf_limit = min(buftbl_free / entries_per_vf, 1676 (1024U - EFX_VI_BASE) >> efx->vi_scale); 1677 1678 if (efx->vf_count > vf_limit) { 1679 netif_err(efx, probe, efx->net_dev, 1680 "Reducing VF count from from %d to %d\n", 1681 efx->vf_count, vf_limit); 1682 efx->vf_count = vf_limit; 1683 } 1684 vi_count += efx->vf_count * efx_vf_size(efx); 1685 } 1686 #endif 1687 1688 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; 1689 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; 1690 } 1691 1692 u32 efx_nic_fpga_ver(struct efx_nic *efx) 1693 { 1694 efx_oword_t altera_build; 1695 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1696 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1697 } 1698 1699 void efx_nic_init_common(struct efx_nic *efx) 1700 { 1701 efx_oword_t temp; 1702 1703 /* Set positions of descriptor caches in SRAM. */ 1704 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); 1705 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1706 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); 1707 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1708 1709 /* Set TX descriptor cache size. */ 1710 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1711 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1712 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1713 1714 /* Set RX descriptor cache size. Set low watermark to size-8, as 1715 * this allows most efficient prefetching. 1716 */ 1717 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1718 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1719 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1720 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1721 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1722 1723 /* Program INT_KER address */ 1724 EFX_POPULATE_OWORD_2(temp, 1725 FRF_AZ_NORM_INT_VEC_DIS_KER, 1726 EFX_INT_MODE_USE_MSI(efx), 1727 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1728 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1729 1730 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1731 /* Use an interrupt level unused by event queues */ 1732 efx->irq_level = 0x1f; 1733 else 1734 /* Use a valid MSI-X vector */ 1735 efx->irq_level = 0; 1736 1737 /* Enable all the genuinely fatal interrupts. (They are still 1738 * masked by the overall interrupt mask, controlled by 1739 * falcon_interrupts()). 1740 * 1741 * Note: All other fatal interrupts are enabled 1742 */ 1743 EFX_POPULATE_OWORD_3(temp, 1744 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1745 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1746 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1747 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1748 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1749 EFX_INVERT_OWORD(temp); 1750 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1751 1752 efx_nic_push_rx_indir_table(efx); 1753 1754 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1755 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1756 */ 1757 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1758 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1759 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1760 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1761 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1762 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1763 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1764 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1765 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1766 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1767 /* Disable hardware watchdog which can misfire */ 1768 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1769 /* Squash TX of packets of 16 bytes or less */ 1770 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1771 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1772 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1773 1774 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 1775 EFX_POPULATE_OWORD_4(temp, 1776 /* Default values */ 1777 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1778 FRF_BZ_TX_PACE_SB_AF, 0xb, 1779 FRF_BZ_TX_PACE_FB_BASE, 0, 1780 /* Allow large pace values in the 1781 * fast bin. */ 1782 FRF_BZ_TX_PACE_BIN_TH, 1783 FFE_BZ_TX_PACE_RESERVED); 1784 efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1785 } 1786 } 1787 1788 /* Register dump */ 1789 1790 #define REGISTER_REVISION_A 1 1791 #define REGISTER_REVISION_B 2 1792 #define REGISTER_REVISION_C 3 1793 #define REGISTER_REVISION_Z 3 /* latest revision */ 1794 1795 struct efx_nic_reg { 1796 u32 offset:24; 1797 u32 min_revision:2, max_revision:2; 1798 }; 1799 1800 #define REGISTER(name, min_rev, max_rev) { \ 1801 FR_ ## min_rev ## max_rev ## _ ## name, \ 1802 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 1803 } 1804 #define REGISTER_AA(name) REGISTER(name, A, A) 1805 #define REGISTER_AB(name) REGISTER(name, A, B) 1806 #define REGISTER_AZ(name) REGISTER(name, A, Z) 1807 #define REGISTER_BB(name) REGISTER(name, B, B) 1808 #define REGISTER_BZ(name) REGISTER(name, B, Z) 1809 #define REGISTER_CZ(name) REGISTER(name, C, Z) 1810 1811 static const struct efx_nic_reg efx_nic_regs[] = { 1812 REGISTER_AZ(ADR_REGION), 1813 REGISTER_AZ(INT_EN_KER), 1814 REGISTER_BZ(INT_EN_CHAR), 1815 REGISTER_AZ(INT_ADR_KER), 1816 REGISTER_BZ(INT_ADR_CHAR), 1817 /* INT_ACK_KER is WO */ 1818 /* INT_ISR0 is RC */ 1819 REGISTER_AZ(HW_INIT), 1820 REGISTER_CZ(USR_EV_CFG), 1821 REGISTER_AB(EE_SPI_HCMD), 1822 REGISTER_AB(EE_SPI_HADR), 1823 REGISTER_AB(EE_SPI_HDATA), 1824 REGISTER_AB(EE_BASE_PAGE), 1825 REGISTER_AB(EE_VPD_CFG0), 1826 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ 1827 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ 1828 /* PCIE_CORE_INDIRECT is indirect */ 1829 REGISTER_AB(NIC_STAT), 1830 REGISTER_AB(GPIO_CTL), 1831 REGISTER_AB(GLB_CTL), 1832 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ 1833 REGISTER_BZ(DP_CTRL), 1834 REGISTER_AZ(MEM_STAT), 1835 REGISTER_AZ(CS_DEBUG), 1836 REGISTER_AZ(ALTERA_BUILD), 1837 REGISTER_AZ(CSR_SPARE), 1838 REGISTER_AB(PCIE_SD_CTL0123), 1839 REGISTER_AB(PCIE_SD_CTL45), 1840 REGISTER_AB(PCIE_PCS_CTL_STAT), 1841 /* DEBUG_DATA_OUT is not used */ 1842 /* DRV_EV is WO */ 1843 REGISTER_AZ(EVQ_CTL), 1844 REGISTER_AZ(EVQ_CNT1), 1845 REGISTER_AZ(EVQ_CNT2), 1846 REGISTER_AZ(BUF_TBL_CFG), 1847 REGISTER_AZ(SRM_RX_DC_CFG), 1848 REGISTER_AZ(SRM_TX_DC_CFG), 1849 REGISTER_AZ(SRM_CFG), 1850 /* BUF_TBL_UPD is WO */ 1851 REGISTER_AZ(SRM_UPD_EVQ), 1852 REGISTER_AZ(SRAM_PARITY), 1853 REGISTER_AZ(RX_CFG), 1854 REGISTER_BZ(RX_FILTER_CTL), 1855 /* RX_FLUSH_DESCQ is WO */ 1856 REGISTER_AZ(RX_DC_CFG), 1857 REGISTER_AZ(RX_DC_PF_WM), 1858 REGISTER_BZ(RX_RSS_TKEY), 1859 /* RX_NODESC_DROP is RC */ 1860 REGISTER_AA(RX_SELF_RST), 1861 /* RX_DEBUG, RX_PUSH_DROP are not used */ 1862 REGISTER_CZ(RX_RSS_IPV6_REG1), 1863 REGISTER_CZ(RX_RSS_IPV6_REG2), 1864 REGISTER_CZ(RX_RSS_IPV6_REG3), 1865 /* TX_FLUSH_DESCQ is WO */ 1866 REGISTER_AZ(TX_DC_CFG), 1867 REGISTER_AA(TX_CHKSM_CFG), 1868 REGISTER_AZ(TX_CFG), 1869 /* TX_PUSH_DROP is not used */ 1870 REGISTER_AZ(TX_RESERVED), 1871 REGISTER_BZ(TX_PACE), 1872 /* TX_PACE_DROP_QID is RC */ 1873 REGISTER_BB(TX_VLAN), 1874 REGISTER_BZ(TX_IPFIL_PORTEN), 1875 REGISTER_AB(MD_TXD), 1876 REGISTER_AB(MD_RXD), 1877 REGISTER_AB(MD_CS), 1878 REGISTER_AB(MD_PHY_ADR), 1879 REGISTER_AB(MD_ID), 1880 /* MD_STAT is RC */ 1881 REGISTER_AB(MAC_STAT_DMA), 1882 REGISTER_AB(MAC_CTRL), 1883 REGISTER_BB(GEN_MODE), 1884 REGISTER_AB(MAC_MC_HASH_REG0), 1885 REGISTER_AB(MAC_MC_HASH_REG1), 1886 REGISTER_AB(GM_CFG1), 1887 REGISTER_AB(GM_CFG2), 1888 /* GM_IPG and GM_HD are not used */ 1889 REGISTER_AB(GM_MAX_FLEN), 1890 /* GM_TEST is not used */ 1891 REGISTER_AB(GM_ADR1), 1892 REGISTER_AB(GM_ADR2), 1893 REGISTER_AB(GMF_CFG0), 1894 REGISTER_AB(GMF_CFG1), 1895 REGISTER_AB(GMF_CFG2), 1896 REGISTER_AB(GMF_CFG3), 1897 REGISTER_AB(GMF_CFG4), 1898 REGISTER_AB(GMF_CFG5), 1899 REGISTER_BB(TX_SRC_MAC_CTL), 1900 REGISTER_AB(XM_ADR_LO), 1901 REGISTER_AB(XM_ADR_HI), 1902 REGISTER_AB(XM_GLB_CFG), 1903 REGISTER_AB(XM_TX_CFG), 1904 REGISTER_AB(XM_RX_CFG), 1905 REGISTER_AB(XM_MGT_INT_MASK), 1906 REGISTER_AB(XM_FC), 1907 REGISTER_AB(XM_PAUSE_TIME), 1908 REGISTER_AB(XM_TX_PARAM), 1909 REGISTER_AB(XM_RX_PARAM), 1910 /* XM_MGT_INT_MSK (note no 'A') is RC */ 1911 REGISTER_AB(XX_PWR_RST), 1912 REGISTER_AB(XX_SD_CTL), 1913 REGISTER_AB(XX_TXDRV_CTL), 1914 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 1915 /* XX_CORE_STAT is partly RC */ 1916 }; 1917 1918 struct efx_nic_reg_table { 1919 u32 offset:24; 1920 u32 min_revision:2, max_revision:2; 1921 u32 step:6, rows:21; 1922 }; 1923 1924 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 1925 offset, \ 1926 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 1927 step, rows \ 1928 } 1929 #define REGISTER_TABLE(name, min_rev, max_rev) \ 1930 REGISTER_TABLE_DIMENSIONS( \ 1931 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 1932 min_rev, max_rev, \ 1933 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 1934 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 1935 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 1936 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 1937 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 1938 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 1939 #define REGISTER_TABLE_BB_CZ(name) \ 1940 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 1941 FR_BZ_ ## name ## _STEP, \ 1942 FR_BB_ ## name ## _ROWS), \ 1943 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 1944 FR_BZ_ ## name ## _STEP, \ 1945 FR_CZ_ ## name ## _ROWS) 1946 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 1947 1948 static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 1949 /* DRIVER is not used */ 1950 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ 1951 REGISTER_TABLE_BB(TX_IPFIL_TBL), 1952 REGISTER_TABLE_BB(TX_SRC_MAC_TBL), 1953 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), 1954 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), 1955 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), 1956 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 1957 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 1958 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 1959 /* We can't reasonably read all of the buffer table (up to 8MB!). 1960 * However this driver will only use a few entries. Reading 1961 * 1K entries allows for some expansion of queue count and 1962 * size before we need to change the version. */ 1963 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 1964 A, A, 8, 1024), 1965 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 1966 B, Z, 8, 1024), 1967 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 1968 REGISTER_TABLE_BB_CZ(TIMER_TBL), 1969 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 1970 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), 1971 /* TX_FILTER_TBL0 is huge and not used by this driver */ 1972 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), 1973 REGISTER_TABLE_CZ(MC_TREG_SMEM), 1974 /* MSIX_PBA_TABLE is not mapped */ 1975 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 1976 REGISTER_TABLE_BZ(RX_FILTER_TBL0), 1977 }; 1978 1979 size_t efx_nic_get_regs_len(struct efx_nic *efx) 1980 { 1981 const struct efx_nic_reg *reg; 1982 const struct efx_nic_reg_table *table; 1983 size_t len = 0; 1984 1985 for (reg = efx_nic_regs; 1986 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 1987 reg++) 1988 if (efx->type->revision >= reg->min_revision && 1989 efx->type->revision <= reg->max_revision) 1990 len += sizeof(efx_oword_t); 1991 1992 for (table = efx_nic_reg_tables; 1993 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 1994 table++) 1995 if (efx->type->revision >= table->min_revision && 1996 efx->type->revision <= table->max_revision) 1997 len += table->rows * min_t(size_t, table->step, 16); 1998 1999 return len; 2000 } 2001 2002 void efx_nic_get_regs(struct efx_nic *efx, void *buf) 2003 { 2004 const struct efx_nic_reg *reg; 2005 const struct efx_nic_reg_table *table; 2006 2007 for (reg = efx_nic_regs; 2008 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 2009 reg++) { 2010 if (efx->type->revision >= reg->min_revision && 2011 efx->type->revision <= reg->max_revision) { 2012 efx_reado(efx, (efx_oword_t *)buf, reg->offset); 2013 buf += sizeof(efx_oword_t); 2014 } 2015 } 2016 2017 for (table = efx_nic_reg_tables; 2018 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2019 table++) { 2020 size_t size, i; 2021 2022 if (!(efx->type->revision >= table->min_revision && 2023 efx->type->revision <= table->max_revision)) 2024 continue; 2025 2026 size = min_t(size_t, table->step, 16); 2027 2028 for (i = 0; i < table->rows; i++) { 2029 switch (table->step) { 2030 case 4: /* 32-bit register or SRAM */ 2031 efx_readd_table(efx, buf, table->offset, i); 2032 break; 2033 case 8: /* 64-bit SRAM */ 2034 efx_sram_readq(efx, 2035 efx->membase + table->offset, 2036 buf, i); 2037 break; 2038 case 16: /* 128-bit register */ 2039 efx_reado_table(efx, buf, table->offset, i); 2040 break; 2041 case 32: /* 128-bit register, interleaved */ 2042 efx_reado_table(efx, buf, table->offset, 2 * i); 2043 break; 2044 default: 2045 WARN_ON(1); 2046 return; 2047 } 2048 buf += size; 2049 } 2050 } 2051 } 2052