1 /**************************************************************************** 2 * Driver for Solarflare Solarstorm network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2011 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11 #include <linux/bitops.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/pci.h> 15 #include <linux/module.h> 16 #include <linux/seq_file.h> 17 #include "net_driver.h" 18 #include "bitfield.h" 19 #include "efx.h" 20 #include "nic.h" 21 #include "regs.h" 22 #include "io.h" 23 #include "workarounds.h" 24 25 /************************************************************************** 26 * 27 * Configurable values 28 * 29 ************************************************************************** 30 */ 31 32 /* This is set to 16 for a good reason. In summary, if larger than 33 * 16, the descriptor cache holds more than a default socket 34 * buffer's worth of packets (for UDP we can only have at most one 35 * socket buffer's worth outstanding). This combined with the fact 36 * that we only get 1 TX event per descriptor cache means the NIC 37 * goes idle. 38 */ 39 #define TX_DC_ENTRIES 16 40 #define TX_DC_ENTRIES_ORDER 1 41 42 #define RX_DC_ENTRIES 64 43 #define RX_DC_ENTRIES_ORDER 3 44 45 /* If EFX_MAX_INT_ERRORS internal errors occur within 46 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 47 * disable it. 48 */ 49 #define EFX_INT_ERROR_EXPIRE 3600 50 #define EFX_MAX_INT_ERRORS 5 51 52 /* Depth of RX flush request fifo */ 53 #define EFX_RX_FLUSH_COUNT 4 54 55 /* Driver generated events */ 56 #define _EFX_CHANNEL_MAGIC_TEST 0x000101 57 #define _EFX_CHANNEL_MAGIC_FILL 0x000102 58 #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 59 #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 60 61 #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 62 #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 63 64 #define EFX_CHANNEL_MAGIC_TEST(_channel) \ 65 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) 66 #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ 67 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ 68 efx_rx_queue_index(_rx_queue)) 69 #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ 70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ 71 efx_rx_queue_index(_rx_queue)) 72 #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ 73 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ 74 (_tx_queue)->queue) 75 76 /************************************************************************** 77 * 78 * Solarstorm hardware access 79 * 80 **************************************************************************/ 81 82 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 83 unsigned int index) 84 { 85 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 86 value, index); 87 } 88 89 /* Read the current event from the event queue */ 90 static inline efx_qword_t *efx_event(struct efx_channel *channel, 91 unsigned int index) 92 { 93 return ((efx_qword_t *) (channel->eventq.addr)) + 94 (index & channel->eventq_mask); 95 } 96 97 /* See if an event is present 98 * 99 * We check both the high and low dword of the event for all ones. We 100 * wrote all ones when we cleared the event, and no valid event can 101 * have all ones in either its high or low dwords. This approach is 102 * robust against reordering. 103 * 104 * Note that using a single 64-bit comparison is incorrect; even 105 * though the CPU read will be atomic, the DMA write may not be. 106 */ 107 static inline int efx_event_present(efx_qword_t *event) 108 { 109 return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 110 EFX_DWORD_IS_ALL_ONES(event->dword[1])); 111 } 112 113 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 114 const efx_oword_t *mask) 115 { 116 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 117 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 118 } 119 120 int efx_nic_test_registers(struct efx_nic *efx, 121 const struct efx_nic_register_test *regs, 122 size_t n_regs) 123 { 124 unsigned address = 0, i, j; 125 efx_oword_t mask, imask, original, reg, buf; 126 127 for (i = 0; i < n_regs; ++i) { 128 address = regs[i].address; 129 mask = imask = regs[i].mask; 130 EFX_INVERT_OWORD(imask); 131 132 efx_reado(efx, &original, address); 133 134 /* bit sweep on and off */ 135 for (j = 0; j < 128; j++) { 136 if (!EFX_EXTRACT_OWORD32(mask, j, j)) 137 continue; 138 139 /* Test this testable bit can be set in isolation */ 140 EFX_AND_OWORD(reg, original, mask); 141 EFX_SET_OWORD32(reg, j, j, 1); 142 143 efx_writeo(efx, ®, address); 144 efx_reado(efx, &buf, address); 145 146 if (efx_masked_compare_oword(®, &buf, &mask)) 147 goto fail; 148 149 /* Test this testable bit can be cleared in isolation */ 150 EFX_OR_OWORD(reg, original, mask); 151 EFX_SET_OWORD32(reg, j, j, 0); 152 153 efx_writeo(efx, ®, address); 154 efx_reado(efx, &buf, address); 155 156 if (efx_masked_compare_oword(®, &buf, &mask)) 157 goto fail; 158 } 159 160 efx_writeo(efx, &original, address); 161 } 162 163 return 0; 164 165 fail: 166 netif_err(efx, hw, efx->net_dev, 167 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 168 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 169 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 170 return -EIO; 171 } 172 173 /************************************************************************** 174 * 175 * Special buffer handling 176 * Special buffers are used for event queues and the TX and RX 177 * descriptor rings. 178 * 179 *************************************************************************/ 180 181 /* 182 * Initialise a special buffer 183 * 184 * This will define a buffer (previously allocated via 185 * efx_alloc_special_buffer()) in the buffer table, allowing 186 * it to be used for event queues, descriptor rings etc. 187 */ 188 static void 189 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 190 { 191 efx_qword_t buf_desc; 192 unsigned int index; 193 dma_addr_t dma_addr; 194 int i; 195 196 EFX_BUG_ON_PARANOID(!buffer->addr); 197 198 /* Write buffer descriptors to NIC */ 199 for (i = 0; i < buffer->entries; i++) { 200 index = buffer->index + i; 201 dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE); 202 netif_dbg(efx, probe, efx->net_dev, 203 "mapping special buffer %d at %llx\n", 204 index, (unsigned long long)dma_addr); 205 EFX_POPULATE_QWORD_3(buf_desc, 206 FRF_AZ_BUF_ADR_REGION, 0, 207 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 208 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 209 efx_write_buf_tbl(efx, &buf_desc, index); 210 } 211 } 212 213 /* Unmaps a buffer and clears the buffer table entries */ 214 static void 215 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 216 { 217 efx_oword_t buf_tbl_upd; 218 unsigned int start = buffer->index; 219 unsigned int end = (buffer->index + buffer->entries - 1); 220 221 if (!buffer->entries) 222 return; 223 224 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 225 buffer->index, buffer->index + buffer->entries - 1); 226 227 EFX_POPULATE_OWORD_4(buf_tbl_upd, 228 FRF_AZ_BUF_UPD_CMD, 0, 229 FRF_AZ_BUF_CLR_CMD, 1, 230 FRF_AZ_BUF_CLR_END_ID, end, 231 FRF_AZ_BUF_CLR_START_ID, start); 232 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 233 } 234 235 /* 236 * Allocate a new special buffer 237 * 238 * This allocates memory for a new buffer, clears it and allocates a 239 * new buffer ID range. It does not write into the buffer table. 240 * 241 * This call will allocate 4KB buffers, since 8KB buffers can't be 242 * used for event queues and descriptor rings. 243 */ 244 static int efx_alloc_special_buffer(struct efx_nic *efx, 245 struct efx_special_buffer *buffer, 246 unsigned int len) 247 { 248 len = ALIGN(len, EFX_BUF_SIZE); 249 250 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 251 &buffer->dma_addr, GFP_KERNEL); 252 if (!buffer->addr) 253 return -ENOMEM; 254 buffer->len = len; 255 buffer->entries = len / EFX_BUF_SIZE; 256 BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); 257 258 /* All zeros is a potentially valid event so memset to 0xff */ 259 memset(buffer->addr, 0xff, len); 260 261 /* Select new buffer ID */ 262 buffer->index = efx->next_buffer_table; 263 efx->next_buffer_table += buffer->entries; 264 #ifdef CONFIG_SFC_SRIOV 265 BUG_ON(efx_sriov_enabled(efx) && 266 efx->vf_buftbl_base < efx->next_buffer_table); 267 #endif 268 269 netif_dbg(efx, probe, efx->net_dev, 270 "allocating special buffers %d-%d at %llx+%x " 271 "(virt %p phys %llx)\n", buffer->index, 272 buffer->index + buffer->entries - 1, 273 (u64)buffer->dma_addr, len, 274 buffer->addr, (u64)virt_to_phys(buffer->addr)); 275 276 return 0; 277 } 278 279 static void 280 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 281 { 282 if (!buffer->addr) 283 return; 284 285 netif_dbg(efx, hw, efx->net_dev, 286 "deallocating special buffers %d-%d at %llx+%x " 287 "(virt %p phys %llx)\n", buffer->index, 288 buffer->index + buffer->entries - 1, 289 (u64)buffer->dma_addr, buffer->len, 290 buffer->addr, (u64)virt_to_phys(buffer->addr)); 291 292 dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, 293 buffer->dma_addr); 294 buffer->addr = NULL; 295 buffer->entries = 0; 296 } 297 298 /************************************************************************** 299 * 300 * Generic buffer handling 301 * These buffers are used for interrupt status, MAC stats, etc. 302 * 303 **************************************************************************/ 304 305 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 306 unsigned int len) 307 { 308 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 309 &buffer->dma_addr, GFP_ATOMIC); 310 if (!buffer->addr) 311 return -ENOMEM; 312 buffer->len = len; 313 memset(buffer->addr, 0, len); 314 return 0; 315 } 316 317 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 318 { 319 if (buffer->addr) { 320 dma_free_coherent(&efx->pci_dev->dev, buffer->len, 321 buffer->addr, buffer->dma_addr); 322 buffer->addr = NULL; 323 } 324 } 325 326 /************************************************************************** 327 * 328 * TX path 329 * 330 **************************************************************************/ 331 332 /* Returns a pointer to the specified transmit descriptor in the TX 333 * descriptor queue belonging to the specified channel. 334 */ 335 static inline efx_qword_t * 336 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 337 { 338 return ((efx_qword_t *) (tx_queue->txd.addr)) + index; 339 } 340 341 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 342 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) 343 { 344 unsigned write_ptr; 345 efx_dword_t reg; 346 347 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 348 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 349 efx_writed_page(tx_queue->efx, ®, 350 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 351 } 352 353 /* Write pointer and first descriptor for TX descriptor ring */ 354 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, 355 const efx_qword_t *txd) 356 { 357 unsigned write_ptr; 358 efx_oword_t reg; 359 360 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 361 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 362 363 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 364 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 365 FRF_AZ_TX_DESC_WPTR, write_ptr); 366 reg.qword[0] = *txd; 367 efx_writeo_page(tx_queue->efx, ®, 368 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 369 } 370 371 static inline bool 372 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) 373 { 374 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 375 376 if (empty_read_count == 0) 377 return false; 378 379 tx_queue->empty_read_count = 0; 380 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; 381 } 382 383 /* For each entry inserted into the software descriptor ring, create a 384 * descriptor in the hardware TX descriptor ring (in host memory), and 385 * write a doorbell. 386 */ 387 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 388 { 389 390 struct efx_tx_buffer *buffer; 391 efx_qword_t *txd; 392 unsigned write_ptr; 393 unsigned old_write_count = tx_queue->write_count; 394 395 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 396 397 do { 398 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 399 buffer = &tx_queue->buffer[write_ptr]; 400 txd = efx_tx_desc(tx_queue, write_ptr); 401 ++tx_queue->write_count; 402 403 /* Create TX descriptor ring entry */ 404 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 405 EFX_POPULATE_QWORD_4(*txd, 406 FSF_AZ_TX_KER_CONT, 407 buffer->flags & EFX_TX_BUF_CONT, 408 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 409 FSF_AZ_TX_KER_BUF_REGION, 0, 410 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 411 } while (tx_queue->write_count != tx_queue->insert_count); 412 413 wmb(); /* Ensure descriptors are written before they are fetched */ 414 415 if (efx_may_push_tx_desc(tx_queue, old_write_count)) { 416 txd = efx_tx_desc(tx_queue, 417 old_write_count & tx_queue->ptr_mask); 418 efx_push_tx_desc(tx_queue, txd); 419 ++tx_queue->pushes; 420 } else { 421 efx_notify_tx_desc(tx_queue); 422 } 423 } 424 425 /* Allocate hardware resources for a TX queue */ 426 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 427 { 428 struct efx_nic *efx = tx_queue->efx; 429 unsigned entries; 430 431 entries = tx_queue->ptr_mask + 1; 432 return efx_alloc_special_buffer(efx, &tx_queue->txd, 433 entries * sizeof(efx_qword_t)); 434 } 435 436 void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 437 { 438 struct efx_nic *efx = tx_queue->efx; 439 efx_oword_t reg; 440 441 /* Pin TX descriptor ring */ 442 efx_init_special_buffer(efx, &tx_queue->txd); 443 444 /* Push TX descriptor ring to card */ 445 EFX_POPULATE_OWORD_10(reg, 446 FRF_AZ_TX_DESCQ_EN, 1, 447 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 448 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 449 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 450 FRF_AZ_TX_DESCQ_EVQ_ID, 451 tx_queue->channel->channel, 452 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 453 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 454 FRF_AZ_TX_DESCQ_SIZE, 455 __ffs(tx_queue->txd.entries), 456 FRF_AZ_TX_DESCQ_TYPE, 0, 457 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 458 459 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 460 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 461 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 462 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, 463 !csum); 464 } 465 466 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 467 tx_queue->queue); 468 469 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 470 /* Only 128 bits in this register */ 471 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 472 473 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 474 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 475 __clear_bit_le(tx_queue->queue, ®); 476 else 477 __set_bit_le(tx_queue->queue, ®); 478 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 479 } 480 481 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 482 EFX_POPULATE_OWORD_1(reg, 483 FRF_BZ_TX_PACE, 484 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 485 FFE_BZ_TX_PACE_OFF : 486 FFE_BZ_TX_PACE_RESERVED); 487 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, 488 tx_queue->queue); 489 } 490 } 491 492 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 493 { 494 struct efx_nic *efx = tx_queue->efx; 495 efx_oword_t tx_flush_descq; 496 497 EFX_POPULATE_OWORD_2(tx_flush_descq, 498 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 499 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 500 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 501 } 502 503 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) 504 { 505 struct efx_nic *efx = tx_queue->efx; 506 efx_oword_t tx_desc_ptr; 507 508 /* Remove TX descriptor ring from card */ 509 EFX_ZERO_OWORD(tx_desc_ptr); 510 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 511 tx_queue->queue); 512 513 /* Unpin TX descriptor ring */ 514 efx_fini_special_buffer(efx, &tx_queue->txd); 515 } 516 517 /* Free buffers backing TX queue */ 518 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 519 { 520 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 521 } 522 523 /************************************************************************** 524 * 525 * RX path 526 * 527 **************************************************************************/ 528 529 /* Returns a pointer to the specified descriptor in the RX descriptor queue */ 530 static inline efx_qword_t * 531 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 532 { 533 return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; 534 } 535 536 /* This creates an entry in the RX descriptor queue */ 537 static inline void 538 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 539 { 540 struct efx_rx_buffer *rx_buf; 541 efx_qword_t *rxd; 542 543 rxd = efx_rx_desc(rx_queue, index); 544 rx_buf = efx_rx_buffer(rx_queue, index); 545 EFX_POPULATE_QWORD_3(*rxd, 546 FSF_AZ_RX_KER_BUF_SIZE, 547 rx_buf->len - 548 rx_queue->efx->type->rx_buffer_padding, 549 FSF_AZ_RX_KER_BUF_REGION, 0, 550 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 551 } 552 553 /* This writes to the RX_DESC_WPTR register for the specified receive 554 * descriptor ring. 555 */ 556 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 557 { 558 struct efx_nic *efx = rx_queue->efx; 559 efx_dword_t reg; 560 unsigned write_ptr; 561 562 while (rx_queue->notified_count != rx_queue->added_count) { 563 efx_build_rx_desc( 564 rx_queue, 565 rx_queue->notified_count & rx_queue->ptr_mask); 566 ++rx_queue->notified_count; 567 } 568 569 wmb(); 570 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 571 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 572 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 573 efx_rx_queue_index(rx_queue)); 574 } 575 576 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 577 { 578 struct efx_nic *efx = rx_queue->efx; 579 unsigned entries; 580 581 entries = rx_queue->ptr_mask + 1; 582 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 583 entries * sizeof(efx_qword_t)); 584 } 585 586 void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 587 { 588 efx_oword_t rx_desc_ptr; 589 struct efx_nic *efx = rx_queue->efx; 590 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 591 bool iscsi_digest_en = is_b0; 592 593 netif_dbg(efx, hw, efx->net_dev, 594 "RX queue %d ring in special buffers %d-%d\n", 595 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 596 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 597 598 /* Pin RX descriptor ring */ 599 efx_init_special_buffer(efx, &rx_queue->rxd); 600 601 /* Push RX descriptor ring to card */ 602 EFX_POPULATE_OWORD_10(rx_desc_ptr, 603 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 604 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 605 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 606 FRF_AZ_RX_DESCQ_EVQ_ID, 607 efx_rx_queue_channel(rx_queue)->channel, 608 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 609 FRF_AZ_RX_DESCQ_LABEL, 610 efx_rx_queue_index(rx_queue), 611 FRF_AZ_RX_DESCQ_SIZE, 612 __ffs(rx_queue->rxd.entries), 613 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 614 /* For >=B0 this is scatter so disable */ 615 FRF_AZ_RX_DESCQ_JUMBO, !is_b0, 616 FRF_AZ_RX_DESCQ_EN, 1); 617 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 618 efx_rx_queue_index(rx_queue)); 619 } 620 621 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 622 { 623 struct efx_nic *efx = rx_queue->efx; 624 efx_oword_t rx_flush_descq; 625 626 EFX_POPULATE_OWORD_2(rx_flush_descq, 627 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 628 FRF_AZ_RX_FLUSH_DESCQ, 629 efx_rx_queue_index(rx_queue)); 630 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 631 } 632 633 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) 634 { 635 efx_oword_t rx_desc_ptr; 636 struct efx_nic *efx = rx_queue->efx; 637 638 /* Remove RX descriptor ring from card */ 639 EFX_ZERO_OWORD(rx_desc_ptr); 640 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 641 efx_rx_queue_index(rx_queue)); 642 643 /* Unpin RX descriptor ring */ 644 efx_fini_special_buffer(efx, &rx_queue->rxd); 645 } 646 647 /* Free buffers backing RX queue */ 648 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 649 { 650 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 651 } 652 653 /************************************************************************** 654 * 655 * Flush handling 656 * 657 **************************************************************************/ 658 659 /* efx_nic_flush_queues() must be woken up when all flushes are completed, 660 * or more RX flushes can be kicked off. 661 */ 662 static bool efx_flush_wake(struct efx_nic *efx) 663 { 664 /* Ensure that all updates are visible to efx_nic_flush_queues() */ 665 smp_mb(); 666 667 return (atomic_read(&efx->drain_pending) == 0 || 668 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT 669 && atomic_read(&efx->rxq_flush_pending) > 0)); 670 } 671 672 /* Flush all the transmit queues, and continue flushing receive queues until 673 * they're all flushed. Wait for the DRAIN events to be recieved so that there 674 * are no more RX and TX events left on any channel. */ 675 int efx_nic_flush_queues(struct efx_nic *efx) 676 { 677 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ 678 struct efx_channel *channel; 679 struct efx_rx_queue *rx_queue; 680 struct efx_tx_queue *tx_queue; 681 int rc = 0; 682 683 efx->type->prepare_flush(efx); 684 685 efx_for_each_channel(channel, efx) { 686 efx_for_each_channel_tx_queue(tx_queue, channel) { 687 atomic_inc(&efx->drain_pending); 688 efx_flush_tx_queue(tx_queue); 689 } 690 efx_for_each_channel_rx_queue(rx_queue, channel) { 691 atomic_inc(&efx->drain_pending); 692 rx_queue->flush_pending = true; 693 atomic_inc(&efx->rxq_flush_pending); 694 } 695 } 696 697 while (timeout && atomic_read(&efx->drain_pending) > 0) { 698 /* If SRIOV is enabled, then offload receive queue flushing to 699 * the firmware (though we will still have to poll for 700 * completion). If that fails, fall back to the old scheme. 701 */ 702 if (efx_sriov_enabled(efx)) { 703 rc = efx_mcdi_flush_rxqs(efx); 704 if (!rc) 705 goto wait; 706 } 707 708 /* The hardware supports four concurrent rx flushes, each of 709 * which may need to be retried if there is an outstanding 710 * descriptor fetch 711 */ 712 efx_for_each_channel(channel, efx) { 713 efx_for_each_channel_rx_queue(rx_queue, channel) { 714 if (atomic_read(&efx->rxq_flush_outstanding) >= 715 EFX_RX_FLUSH_COUNT) 716 break; 717 718 if (rx_queue->flush_pending) { 719 rx_queue->flush_pending = false; 720 atomic_dec(&efx->rxq_flush_pending); 721 atomic_inc(&efx->rxq_flush_outstanding); 722 efx_flush_rx_queue(rx_queue); 723 } 724 } 725 } 726 727 wait: 728 timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx), 729 timeout); 730 } 731 732 if (atomic_read(&efx->drain_pending)) { 733 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 734 "(rx %d+%d)\n", atomic_read(&efx->drain_pending), 735 atomic_read(&efx->rxq_flush_outstanding), 736 atomic_read(&efx->rxq_flush_pending)); 737 rc = -ETIMEDOUT; 738 739 atomic_set(&efx->drain_pending, 0); 740 atomic_set(&efx->rxq_flush_pending, 0); 741 atomic_set(&efx->rxq_flush_outstanding, 0); 742 } 743 744 efx->type->finish_flush(efx); 745 746 return rc; 747 } 748 749 /************************************************************************** 750 * 751 * Event queue processing 752 * Event queues are processed by per-channel tasklets. 753 * 754 **************************************************************************/ 755 756 /* Update a channel's event queue's read pointer (RPTR) register 757 * 758 * This writes the EVQ_RPTR_REG register for the specified channel's 759 * event queue. 760 */ 761 void efx_nic_eventq_read_ack(struct efx_channel *channel) 762 { 763 efx_dword_t reg; 764 struct efx_nic *efx = channel->efx; 765 766 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 767 channel->eventq_read_ptr & channel->eventq_mask); 768 769 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size 770 * of 4 bytes, but it is really 16 bytes just like later revisions. 771 */ 772 efx_writed(efx, ®, 773 efx->type->evq_rptr_tbl_base + 774 FR_BZ_EVQ_RPTR_STEP * channel->channel); 775 } 776 777 /* Use HW to insert a SW defined event */ 778 void efx_generate_event(struct efx_nic *efx, unsigned int evq, 779 efx_qword_t *event) 780 { 781 efx_oword_t drv_ev_reg; 782 783 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 784 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 785 drv_ev_reg.u32[0] = event->u32[0]; 786 drv_ev_reg.u32[1] = event->u32[1]; 787 drv_ev_reg.u32[2] = 0; 788 drv_ev_reg.u32[3] = 0; 789 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); 790 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); 791 } 792 793 static void efx_magic_event(struct efx_channel *channel, u32 magic) 794 { 795 efx_qword_t event; 796 797 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 798 FSE_AZ_EV_CODE_DRV_GEN_EV, 799 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 800 efx_generate_event(channel->efx, channel->channel, &event); 801 } 802 803 /* Handle a transmit completion event 804 * 805 * The NIC batches TX completion events; the message we receive is of 806 * the form "complete all TX events up to this index". 807 */ 808 static int 809 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 810 { 811 unsigned int tx_ev_desc_ptr; 812 unsigned int tx_ev_q_label; 813 struct efx_tx_queue *tx_queue; 814 struct efx_nic *efx = channel->efx; 815 int tx_packets = 0; 816 817 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 818 return 0; 819 820 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 821 /* Transmit completion */ 822 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 823 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 824 tx_queue = efx_channel_get_tx_queue( 825 channel, tx_ev_q_label % EFX_TXQ_TYPES); 826 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 827 tx_queue->ptr_mask); 828 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 829 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 830 /* Rewrite the FIFO write pointer */ 831 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 832 tx_queue = efx_channel_get_tx_queue( 833 channel, tx_ev_q_label % EFX_TXQ_TYPES); 834 835 netif_tx_lock(efx->net_dev); 836 efx_notify_tx_desc(tx_queue); 837 netif_tx_unlock(efx->net_dev); 838 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 839 EFX_WORKAROUND_10727(efx)) { 840 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 841 } else { 842 netif_err(efx, tx_err, efx->net_dev, 843 "channel %d unexpected TX event " 844 EFX_QWORD_FMT"\n", channel->channel, 845 EFX_QWORD_VAL(*event)); 846 } 847 848 return tx_packets; 849 } 850 851 /* Detect errors included in the rx_evt_pkt_ok bit. */ 852 static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 853 const efx_qword_t *event) 854 { 855 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 856 struct efx_nic *efx = rx_queue->efx; 857 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 858 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 859 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 860 bool rx_ev_other_err, rx_ev_pause_frm; 861 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 862 unsigned rx_ev_pkt_type; 863 864 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 865 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 866 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 867 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 868 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 869 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 870 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 871 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 872 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 873 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 874 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 875 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 876 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 877 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 878 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 879 880 /* Every error apart from tobe_disc and pause_frm */ 881 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 882 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 883 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 884 885 /* Count errors that are not in MAC stats. Ignore expected 886 * checksum errors during self-test. */ 887 if (rx_ev_frm_trunc) 888 ++channel->n_rx_frm_trunc; 889 else if (rx_ev_tobe_disc) 890 ++channel->n_rx_tobe_disc; 891 else if (!efx->loopback_selftest) { 892 if (rx_ev_ip_hdr_chksum_err) 893 ++channel->n_rx_ip_hdr_chksum_err; 894 else if (rx_ev_tcp_udp_chksum_err) 895 ++channel->n_rx_tcp_udp_chksum_err; 896 } 897 898 /* TOBE_DISC is expected on unicast mismatches; don't print out an 899 * error message. FRM_TRUNC indicates RXDP dropped the packet due 900 * to a FIFO overflow. 901 */ 902 #ifdef DEBUG 903 if (rx_ev_other_err && net_ratelimit()) { 904 netif_dbg(efx, rx_err, efx->net_dev, 905 " RX queue %d unexpected RX event " 906 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 907 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 908 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 909 rx_ev_ip_hdr_chksum_err ? 910 " [IP_HDR_CHKSUM_ERR]" : "", 911 rx_ev_tcp_udp_chksum_err ? 912 " [TCP_UDP_CHKSUM_ERR]" : "", 913 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 914 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 915 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 916 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 917 rx_ev_pause_frm ? " [PAUSE]" : ""); 918 } 919 #endif 920 921 /* The frame must be discarded if any of these are true. */ 922 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 923 rx_ev_tobe_disc | rx_ev_pause_frm) ? 924 EFX_RX_PKT_DISCARD : 0; 925 } 926 927 /* Handle receive events that are not in-order. */ 928 static void 929 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 930 { 931 struct efx_nic *efx = rx_queue->efx; 932 unsigned expected, dropped; 933 934 expected = rx_queue->removed_count & rx_queue->ptr_mask; 935 dropped = (index - expected) & rx_queue->ptr_mask; 936 netif_info(efx, rx_err, efx->net_dev, 937 "dropped %d events (index=%d expected=%d)\n", 938 dropped, index, expected); 939 940 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 941 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 942 } 943 944 /* Handle a packet received event 945 * 946 * The NIC gives a "discard" flag if it's a unicast packet with the 947 * wrong destination address 948 * Also "is multicast" and "matches multicast filter" flags can be used to 949 * discard non-matching multicast packets. 950 */ 951 static void 952 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 953 { 954 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 955 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 956 unsigned expected_ptr; 957 bool rx_ev_pkt_ok; 958 u16 flags; 959 struct efx_rx_queue *rx_queue; 960 struct efx_nic *efx = channel->efx; 961 962 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 963 return; 964 965 /* Basic packet information */ 966 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 967 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 968 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 969 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); 970 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); 971 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 972 channel->channel); 973 974 rx_queue = efx_channel_get_rx_queue(channel); 975 976 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 977 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 978 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 979 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 980 981 if (likely(rx_ev_pkt_ok)) { 982 /* If packet is marked as OK and packet type is TCP/IP or 983 * UDP/IP, then we can rely on the hardware checksum. 984 */ 985 flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 986 rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ? 987 EFX_RX_PKT_CSUMMED : 0; 988 } else { 989 flags = efx_handle_rx_not_ok(rx_queue, event); 990 } 991 992 /* Detect multicast packets that didn't match the filter */ 993 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 994 if (rx_ev_mcast_pkt) { 995 unsigned int rx_ev_mcast_hash_match = 996 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 997 998 if (unlikely(!rx_ev_mcast_hash_match)) { 999 ++channel->n_rx_mcast_mismatch; 1000 flags |= EFX_RX_PKT_DISCARD; 1001 } 1002 } 1003 1004 channel->irq_mod_score += 2; 1005 1006 /* Handle received packet */ 1007 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); 1008 } 1009 1010 /* If this flush done event corresponds to a &struct efx_tx_queue, then 1011 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue 1012 * of all transmit completions. 1013 */ 1014 static void 1015 efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1016 { 1017 struct efx_tx_queue *tx_queue; 1018 int qid; 1019 1020 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1021 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { 1022 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, 1023 qid % EFX_TXQ_TYPES); 1024 1025 efx_magic_event(tx_queue->channel, 1026 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 1027 } 1028 } 1029 1030 /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush 1031 * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add 1032 * the RX queue back to the mask of RX queues in need of flushing. 1033 */ 1034 static void 1035 efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1036 { 1037 struct efx_channel *channel; 1038 struct efx_rx_queue *rx_queue; 1039 int qid; 1040 bool failed; 1041 1042 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1043 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1044 if (qid >= efx->n_channels) 1045 return; 1046 channel = efx_get_channel(efx, qid); 1047 if (!efx_channel_has_rx_queue(channel)) 1048 return; 1049 rx_queue = efx_channel_get_rx_queue(channel); 1050 1051 if (failed) { 1052 netif_info(efx, hw, efx->net_dev, 1053 "RXQ %d flush retry\n", qid); 1054 rx_queue->flush_pending = true; 1055 atomic_inc(&efx->rxq_flush_pending); 1056 } else { 1057 efx_magic_event(efx_rx_queue_channel(rx_queue), 1058 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); 1059 } 1060 atomic_dec(&efx->rxq_flush_outstanding); 1061 if (efx_flush_wake(efx)) 1062 wake_up(&efx->flush_wq); 1063 } 1064 1065 static void 1066 efx_handle_drain_event(struct efx_channel *channel) 1067 { 1068 struct efx_nic *efx = channel->efx; 1069 1070 WARN_ON(atomic_read(&efx->drain_pending) == 0); 1071 atomic_dec(&efx->drain_pending); 1072 if (efx_flush_wake(efx)) 1073 wake_up(&efx->flush_wq); 1074 } 1075 1076 static void 1077 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 1078 { 1079 struct efx_nic *efx = channel->efx; 1080 struct efx_rx_queue *rx_queue = 1081 efx_channel_has_rx_queue(channel) ? 1082 efx_channel_get_rx_queue(channel) : NULL; 1083 unsigned magic, code; 1084 1085 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 1086 code = _EFX_CHANNEL_MAGIC_CODE(magic); 1087 1088 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { 1089 channel->event_test_cpu = raw_smp_processor_id(); 1090 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { 1091 /* The queue must be empty, so we won't receive any rx 1092 * events, so efx_process_channel() won't refill the 1093 * queue. Refill it here */ 1094 efx_fast_push_rx_descriptors(rx_queue); 1095 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { 1096 rx_queue->enabled = false; 1097 efx_handle_drain_event(channel); 1098 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { 1099 efx_handle_drain_event(channel); 1100 } else { 1101 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1102 "generated event "EFX_QWORD_FMT"\n", 1103 channel->channel, EFX_QWORD_VAL(*event)); 1104 } 1105 } 1106 1107 static void 1108 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 1109 { 1110 struct efx_nic *efx = channel->efx; 1111 unsigned int ev_sub_code; 1112 unsigned int ev_sub_data; 1113 1114 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 1115 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1116 1117 switch (ev_sub_code) { 1118 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1119 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1120 channel->channel, ev_sub_data); 1121 efx_handle_tx_flush_done(efx, event); 1122 efx_sriov_tx_flush_done(efx, event); 1123 break; 1124 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1125 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1126 channel->channel, ev_sub_data); 1127 efx_handle_rx_flush_done(efx, event); 1128 efx_sriov_rx_flush_done(efx, event); 1129 break; 1130 case FSE_AZ_EVQ_INIT_DONE_EV: 1131 netif_dbg(efx, hw, efx->net_dev, 1132 "channel %d EVQ %d initialised\n", 1133 channel->channel, ev_sub_data); 1134 break; 1135 case FSE_AZ_SRM_UPD_DONE_EV: 1136 netif_vdbg(efx, hw, efx->net_dev, 1137 "channel %d SRAM update done\n", channel->channel); 1138 break; 1139 case FSE_AZ_WAKE_UP_EV: 1140 netif_vdbg(efx, hw, efx->net_dev, 1141 "channel %d RXQ %d wakeup event\n", 1142 channel->channel, ev_sub_data); 1143 break; 1144 case FSE_AZ_TIMER_EV: 1145 netif_vdbg(efx, hw, efx->net_dev, 1146 "channel %d RX queue %d timer expired\n", 1147 channel->channel, ev_sub_data); 1148 break; 1149 case FSE_AA_RX_RECOVER_EV: 1150 netif_err(efx, rx_err, efx->net_dev, 1151 "channel %d seen DRIVER RX_RESET event. " 1152 "Resetting.\n", channel->channel); 1153 atomic_inc(&efx->rx_reset); 1154 efx_schedule_reset(efx, 1155 EFX_WORKAROUND_6555(efx) ? 1156 RESET_TYPE_RX_RECOVERY : 1157 RESET_TYPE_DISABLE); 1158 break; 1159 case FSE_BZ_RX_DSC_ERROR_EV: 1160 if (ev_sub_data < EFX_VI_BASE) { 1161 netif_err(efx, rx_err, efx->net_dev, 1162 "RX DMA Q %d reports descriptor fetch error." 1163 " RX Q %d is disabled.\n", ev_sub_data, 1164 ev_sub_data); 1165 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 1166 } else 1167 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1168 break; 1169 case FSE_BZ_TX_DSC_ERROR_EV: 1170 if (ev_sub_data < EFX_VI_BASE) { 1171 netif_err(efx, tx_err, efx->net_dev, 1172 "TX DMA Q %d reports descriptor fetch error." 1173 " TX Q %d is disabled.\n", ev_sub_data, 1174 ev_sub_data); 1175 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 1176 } else 1177 efx_sriov_desc_fetch_err(efx, ev_sub_data); 1178 break; 1179 default: 1180 netif_vdbg(efx, hw, efx->net_dev, 1181 "channel %d unknown driver event code %d " 1182 "data %04x\n", channel->channel, ev_sub_code, 1183 ev_sub_data); 1184 break; 1185 } 1186 } 1187 1188 int efx_nic_process_eventq(struct efx_channel *channel, int budget) 1189 { 1190 struct efx_nic *efx = channel->efx; 1191 unsigned int read_ptr; 1192 efx_qword_t event, *p_event; 1193 int ev_code; 1194 int tx_packets = 0; 1195 int spent = 0; 1196 1197 read_ptr = channel->eventq_read_ptr; 1198 1199 for (;;) { 1200 p_event = efx_event(channel, read_ptr); 1201 event = *p_event; 1202 1203 if (!efx_event_present(&event)) 1204 /* End of events */ 1205 break; 1206 1207 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1208 "channel %d event is "EFX_QWORD_FMT"\n", 1209 channel->channel, EFX_QWORD_VAL(event)); 1210 1211 /* Clear this event by marking it all ones */ 1212 EFX_SET_QWORD(*p_event); 1213 1214 ++read_ptr; 1215 1216 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1217 1218 switch (ev_code) { 1219 case FSE_AZ_EV_CODE_RX_EV: 1220 efx_handle_rx_event(channel, &event); 1221 if (++spent == budget) 1222 goto out; 1223 break; 1224 case FSE_AZ_EV_CODE_TX_EV: 1225 tx_packets += efx_handle_tx_event(channel, &event); 1226 if (tx_packets > efx->txq_entries) { 1227 spent = budget; 1228 goto out; 1229 } 1230 break; 1231 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1232 efx_handle_generated_event(channel, &event); 1233 break; 1234 case FSE_AZ_EV_CODE_DRIVER_EV: 1235 efx_handle_driver_event(channel, &event); 1236 break; 1237 case FSE_CZ_EV_CODE_USER_EV: 1238 efx_sriov_event(channel, &event); 1239 break; 1240 case FSE_CZ_EV_CODE_MCDI_EV: 1241 efx_mcdi_process_event(channel, &event); 1242 break; 1243 case FSE_AZ_EV_CODE_GLOBAL_EV: 1244 if (efx->type->handle_global_event && 1245 efx->type->handle_global_event(channel, &event)) 1246 break; 1247 /* else fall through */ 1248 default: 1249 netif_err(channel->efx, hw, channel->efx->net_dev, 1250 "channel %d unknown event type %d (data " 1251 EFX_QWORD_FMT ")\n", channel->channel, 1252 ev_code, EFX_QWORD_VAL(event)); 1253 } 1254 } 1255 1256 out: 1257 channel->eventq_read_ptr = read_ptr; 1258 return spent; 1259 } 1260 1261 /* Check whether an event is present in the eventq at the current 1262 * read pointer. Only useful for self-test. 1263 */ 1264 bool efx_nic_event_present(struct efx_channel *channel) 1265 { 1266 return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1267 } 1268 1269 /* Allocate buffer table entries for event queue */ 1270 int efx_nic_probe_eventq(struct efx_channel *channel) 1271 { 1272 struct efx_nic *efx = channel->efx; 1273 unsigned entries; 1274 1275 entries = channel->eventq_mask + 1; 1276 return efx_alloc_special_buffer(efx, &channel->eventq, 1277 entries * sizeof(efx_qword_t)); 1278 } 1279 1280 void efx_nic_init_eventq(struct efx_channel *channel) 1281 { 1282 efx_oword_t reg; 1283 struct efx_nic *efx = channel->efx; 1284 1285 netif_dbg(efx, hw, efx->net_dev, 1286 "channel %d event queue in special buffers %d-%d\n", 1287 channel->channel, channel->eventq.index, 1288 channel->eventq.index + channel->eventq.entries - 1); 1289 1290 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1291 EFX_POPULATE_OWORD_3(reg, 1292 FRF_CZ_TIMER_Q_EN, 1, 1293 FRF_CZ_HOST_NOTIFY_MODE, 0, 1294 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1295 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1296 } 1297 1298 /* Pin event queue buffer */ 1299 efx_init_special_buffer(efx, &channel->eventq); 1300 1301 /* Fill event queue with all ones (i.e. empty events) */ 1302 memset(channel->eventq.addr, 0xff, channel->eventq.len); 1303 1304 /* Push event queue to card */ 1305 EFX_POPULATE_OWORD_3(reg, 1306 FRF_AZ_EVQ_EN, 1, 1307 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1308 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1309 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1310 channel->channel); 1311 1312 efx->type->push_irq_moderation(channel); 1313 } 1314 1315 void efx_nic_fini_eventq(struct efx_channel *channel) 1316 { 1317 efx_oword_t reg; 1318 struct efx_nic *efx = channel->efx; 1319 1320 /* Remove event queue from card */ 1321 EFX_ZERO_OWORD(reg); 1322 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1323 channel->channel); 1324 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1325 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1326 1327 /* Unpin event queue */ 1328 efx_fini_special_buffer(efx, &channel->eventq); 1329 } 1330 1331 /* Free buffers backing event queue */ 1332 void efx_nic_remove_eventq(struct efx_channel *channel) 1333 { 1334 efx_free_special_buffer(channel->efx, &channel->eventq); 1335 } 1336 1337 1338 void efx_nic_event_test_start(struct efx_channel *channel) 1339 { 1340 channel->event_test_cpu = -1; 1341 smp_wmb(); 1342 efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 1343 } 1344 1345 void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) 1346 { 1347 efx_magic_event(efx_rx_queue_channel(rx_queue), 1348 EFX_CHANNEL_MAGIC_FILL(rx_queue)); 1349 } 1350 1351 /************************************************************************** 1352 * 1353 * Hardware interrupts 1354 * The hardware interrupt handler does very little work; all the event 1355 * queue processing is carried out by per-channel tasklets. 1356 * 1357 **************************************************************************/ 1358 1359 /* Enable/disable/generate interrupts */ 1360 static inline void efx_nic_interrupts(struct efx_nic *efx, 1361 bool enabled, bool force) 1362 { 1363 efx_oword_t int_en_reg_ker; 1364 1365 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1366 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1367 FRF_AZ_KER_INT_KER, force, 1368 FRF_AZ_DRV_INT_EN_KER, enabled); 1369 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1370 } 1371 1372 void efx_nic_enable_interrupts(struct efx_nic *efx) 1373 { 1374 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1375 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1376 1377 efx_nic_interrupts(efx, true, false); 1378 } 1379 1380 void efx_nic_disable_interrupts(struct efx_nic *efx) 1381 { 1382 /* Disable interrupts */ 1383 efx_nic_interrupts(efx, false, false); 1384 } 1385 1386 /* Generate a test interrupt 1387 * Interrupt must already have been enabled, otherwise nasty things 1388 * may happen. 1389 */ 1390 void efx_nic_irq_test_start(struct efx_nic *efx) 1391 { 1392 efx->last_irq_cpu = -1; 1393 smp_wmb(); 1394 efx_nic_interrupts(efx, true, true); 1395 } 1396 1397 /* Process a fatal interrupt 1398 * Disable bus mastering ASAP and schedule a reset 1399 */ 1400 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) 1401 { 1402 struct falcon_nic_data *nic_data = efx->nic_data; 1403 efx_oword_t *int_ker = efx->irq_status.addr; 1404 efx_oword_t fatal_intr; 1405 int error, mem_perr; 1406 1407 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1408 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1409 1410 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1411 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1412 EFX_OWORD_VAL(fatal_intr), 1413 error ? "disabling bus mastering" : "no recognised error"); 1414 1415 /* If this is a memory parity error dump which blocks are offending */ 1416 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1417 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1418 if (mem_perr) { 1419 efx_oword_t reg; 1420 efx_reado(efx, ®, FR_AZ_MEM_STAT); 1421 netif_err(efx, hw, efx->net_dev, 1422 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1423 EFX_OWORD_VAL(reg)); 1424 } 1425 1426 /* Disable both devices */ 1427 pci_clear_master(efx->pci_dev); 1428 if (efx_nic_is_dual_func(efx)) 1429 pci_clear_master(nic_data->pci_dev2); 1430 efx_nic_disable_interrupts(efx); 1431 1432 /* Count errors and reset or disable the NIC accordingly */ 1433 if (efx->int_error_count == 0 || 1434 time_after(jiffies, efx->int_error_expire)) { 1435 efx->int_error_count = 0; 1436 efx->int_error_expire = 1437 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1438 } 1439 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1440 netif_err(efx, hw, efx->net_dev, 1441 "SYSTEM ERROR - reset scheduled\n"); 1442 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1443 } else { 1444 netif_err(efx, hw, efx->net_dev, 1445 "SYSTEM ERROR - max number of errors seen." 1446 "NIC will be disabled\n"); 1447 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1448 } 1449 1450 return IRQ_HANDLED; 1451 } 1452 1453 /* Handle a legacy interrupt 1454 * Acknowledges the interrupt and schedule event queue processing. 1455 */ 1456 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) 1457 { 1458 struct efx_nic *efx = dev_id; 1459 efx_oword_t *int_ker = efx->irq_status.addr; 1460 irqreturn_t result = IRQ_NONE; 1461 struct efx_channel *channel; 1462 efx_dword_t reg; 1463 u32 queues; 1464 int syserr; 1465 1466 /* Could this be ours? If interrupts are disabled then the 1467 * channel state may not be valid. 1468 */ 1469 if (!efx->legacy_irq_enabled) 1470 return result; 1471 1472 /* Read the ISR which also ACKs the interrupts */ 1473 efx_readd(efx, ®, FR_BZ_INT_ISR0); 1474 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1475 1476 /* Handle non-event-queue sources */ 1477 if (queues & (1U << efx->irq_level)) { 1478 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1479 if (unlikely(syserr)) 1480 return efx_nic_fatal_interrupt(efx); 1481 efx->last_irq_cpu = raw_smp_processor_id(); 1482 } 1483 1484 if (queues != 0) { 1485 if (EFX_WORKAROUND_15783(efx)) 1486 efx->irq_zero_count = 0; 1487 1488 /* Schedule processing of any interrupting queues */ 1489 efx_for_each_channel(channel, efx) { 1490 if (queues & 1) 1491 efx_schedule_channel_irq(channel); 1492 queues >>= 1; 1493 } 1494 result = IRQ_HANDLED; 1495 1496 } else if (EFX_WORKAROUND_15783(efx)) { 1497 efx_qword_t *event; 1498 1499 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1500 * because this might be a shared interrupt. */ 1501 if (efx->irq_zero_count++ == 0) 1502 result = IRQ_HANDLED; 1503 1504 /* Ensure we schedule or rearm all event queues */ 1505 efx_for_each_channel(channel, efx) { 1506 event = efx_event(channel, channel->eventq_read_ptr); 1507 if (efx_event_present(event)) 1508 efx_schedule_channel_irq(channel); 1509 else 1510 efx_nic_eventq_read_ack(channel); 1511 } 1512 } 1513 1514 if (result == IRQ_HANDLED) 1515 netif_vdbg(efx, intr, efx->net_dev, 1516 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1517 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1518 1519 return result; 1520 } 1521 1522 /* Handle an MSI interrupt 1523 * 1524 * Handle an MSI hardware interrupt. This routine schedules event 1525 * queue processing. No interrupt acknowledgement cycle is necessary. 1526 * Also, we never need to check that the interrupt is for us, since 1527 * MSI interrupts cannot be shared. 1528 */ 1529 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1530 { 1531 struct efx_channel *channel = *(struct efx_channel **)dev_id; 1532 struct efx_nic *efx = channel->efx; 1533 efx_oword_t *int_ker = efx->irq_status.addr; 1534 int syserr; 1535 1536 netif_vdbg(efx, intr, efx->net_dev, 1537 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1538 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1539 1540 /* Handle non-event-queue sources */ 1541 if (channel->channel == efx->irq_level) { 1542 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1543 if (unlikely(syserr)) 1544 return efx_nic_fatal_interrupt(efx); 1545 efx->last_irq_cpu = raw_smp_processor_id(); 1546 } 1547 1548 /* Schedule processing of the channel */ 1549 efx_schedule_channel_irq(channel); 1550 1551 return IRQ_HANDLED; 1552 } 1553 1554 1555 /* Setup RSS indirection table. 1556 * This maps from the hash value of the packet to RXQ 1557 */ 1558 void efx_nic_push_rx_indir_table(struct efx_nic *efx) 1559 { 1560 size_t i = 0; 1561 efx_dword_t dword; 1562 1563 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) 1564 return; 1565 1566 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1567 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1568 1569 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1570 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1571 efx->rx_indir_table[i]); 1572 efx_writed(efx, &dword, 1573 FR_BZ_RX_INDIRECTION_TBL + 1574 FR_BZ_RX_INDIRECTION_TBL_STEP * i); 1575 } 1576 } 1577 1578 /* Hook interrupt handler(s) 1579 * Try MSI and then legacy interrupts. 1580 */ 1581 int efx_nic_init_interrupt(struct efx_nic *efx) 1582 { 1583 struct efx_channel *channel; 1584 int rc; 1585 1586 if (!EFX_INT_MODE_USE_MSI(efx)) { 1587 irq_handler_t handler; 1588 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1589 handler = efx_legacy_interrupt; 1590 else 1591 handler = falcon_legacy_interrupt_a1; 1592 1593 rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, 1594 efx->name, efx); 1595 if (rc) { 1596 netif_err(efx, drv, efx->net_dev, 1597 "failed to hook legacy IRQ %d\n", 1598 efx->pci_dev->irq); 1599 goto fail1; 1600 } 1601 return 0; 1602 } 1603 1604 /* Hook MSI or MSI-X interrupt */ 1605 efx_for_each_channel(channel, efx) { 1606 rc = request_irq(channel->irq, efx_msi_interrupt, 1607 IRQF_PROBE_SHARED, /* Not shared */ 1608 efx->channel_name[channel->channel], 1609 &efx->channel[channel->channel]); 1610 if (rc) { 1611 netif_err(efx, drv, efx->net_dev, 1612 "failed to hook IRQ %d\n", channel->irq); 1613 goto fail2; 1614 } 1615 } 1616 1617 return 0; 1618 1619 fail2: 1620 efx_for_each_channel(channel, efx) 1621 free_irq(channel->irq, &efx->channel[channel->channel]); 1622 fail1: 1623 return rc; 1624 } 1625 1626 void efx_nic_fini_interrupt(struct efx_nic *efx) 1627 { 1628 struct efx_channel *channel; 1629 efx_oword_t reg; 1630 1631 /* Disable MSI/MSI-X interrupts */ 1632 efx_for_each_channel(channel, efx) { 1633 if (channel->irq) 1634 free_irq(channel->irq, &efx->channel[channel->channel]); 1635 } 1636 1637 /* ACK legacy interrupt */ 1638 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1639 efx_reado(efx, ®, FR_BZ_INT_ISR0); 1640 else 1641 falcon_irq_ack_a1(efx); 1642 1643 /* Disable legacy interrupt */ 1644 if (efx->legacy_irq) 1645 free_irq(efx->legacy_irq, efx); 1646 } 1647 1648 /* Looks at available SRAM resources and works out how many queues we 1649 * can support, and where things like descriptor caches should live. 1650 * 1651 * SRAM is split up as follows: 1652 * 0 buftbl entries for channels 1653 * efx->vf_buftbl_base buftbl entries for SR-IOV 1654 * efx->rx_dc_base RX descriptor caches 1655 * efx->tx_dc_base TX descriptor caches 1656 */ 1657 void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) 1658 { 1659 unsigned vi_count, buftbl_min; 1660 1661 /* Account for the buffer table entries backing the datapath channels 1662 * and the descriptor caches for those channels. 1663 */ 1664 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + 1665 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + 1666 efx->n_channels * EFX_MAX_EVQ_SIZE) 1667 * sizeof(efx_qword_t) / EFX_BUF_SIZE); 1668 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); 1669 1670 #ifdef CONFIG_SFC_SRIOV 1671 if (efx_sriov_wanted(efx)) { 1672 unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; 1673 1674 efx->vf_buftbl_base = buftbl_min; 1675 1676 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; 1677 vi_count = max(vi_count, EFX_VI_BASE); 1678 buftbl_free = (sram_lim_qw - buftbl_min - 1679 vi_count * vi_dc_entries); 1680 1681 entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * 1682 efx_vf_size(efx)); 1683 vf_limit = min(buftbl_free / entries_per_vf, 1684 (1024U - EFX_VI_BASE) >> efx->vi_scale); 1685 1686 if (efx->vf_count > vf_limit) { 1687 netif_err(efx, probe, efx->net_dev, 1688 "Reducing VF count from from %d to %d\n", 1689 efx->vf_count, vf_limit); 1690 efx->vf_count = vf_limit; 1691 } 1692 vi_count += efx->vf_count * efx_vf_size(efx); 1693 } 1694 #endif 1695 1696 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; 1697 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; 1698 } 1699 1700 u32 efx_nic_fpga_ver(struct efx_nic *efx) 1701 { 1702 efx_oword_t altera_build; 1703 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1704 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1705 } 1706 1707 void efx_nic_init_common(struct efx_nic *efx) 1708 { 1709 efx_oword_t temp; 1710 1711 /* Set positions of descriptor caches in SRAM. */ 1712 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); 1713 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1714 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); 1715 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1716 1717 /* Set TX descriptor cache size. */ 1718 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1719 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1720 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1721 1722 /* Set RX descriptor cache size. Set low watermark to size-8, as 1723 * this allows most efficient prefetching. 1724 */ 1725 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1726 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1727 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1728 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1729 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1730 1731 /* Program INT_KER address */ 1732 EFX_POPULATE_OWORD_2(temp, 1733 FRF_AZ_NORM_INT_VEC_DIS_KER, 1734 EFX_INT_MODE_USE_MSI(efx), 1735 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1736 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1737 1738 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1739 /* Use an interrupt level unused by event queues */ 1740 efx->irq_level = 0x1f; 1741 else 1742 /* Use a valid MSI-X vector */ 1743 efx->irq_level = 0; 1744 1745 /* Enable all the genuinely fatal interrupts. (They are still 1746 * masked by the overall interrupt mask, controlled by 1747 * falcon_interrupts()). 1748 * 1749 * Note: All other fatal interrupts are enabled 1750 */ 1751 EFX_POPULATE_OWORD_3(temp, 1752 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1753 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1754 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1755 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1756 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1757 EFX_INVERT_OWORD(temp); 1758 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1759 1760 efx_nic_push_rx_indir_table(efx); 1761 1762 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1763 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1764 */ 1765 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1766 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1767 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1768 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1769 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1770 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1771 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1772 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1773 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1774 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1775 /* Disable hardware watchdog which can misfire */ 1776 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1777 /* Squash TX of packets of 16 bytes or less */ 1778 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1779 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1780 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1781 1782 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 1783 EFX_POPULATE_OWORD_4(temp, 1784 /* Default values */ 1785 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1786 FRF_BZ_TX_PACE_SB_AF, 0xb, 1787 FRF_BZ_TX_PACE_FB_BASE, 0, 1788 /* Allow large pace values in the 1789 * fast bin. */ 1790 FRF_BZ_TX_PACE_BIN_TH, 1791 FFE_BZ_TX_PACE_RESERVED); 1792 efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1793 } 1794 } 1795 1796 /* Register dump */ 1797 1798 #define REGISTER_REVISION_A 1 1799 #define REGISTER_REVISION_B 2 1800 #define REGISTER_REVISION_C 3 1801 #define REGISTER_REVISION_Z 3 /* latest revision */ 1802 1803 struct efx_nic_reg { 1804 u32 offset:24; 1805 u32 min_revision:2, max_revision:2; 1806 }; 1807 1808 #define REGISTER(name, min_rev, max_rev) { \ 1809 FR_ ## min_rev ## max_rev ## _ ## name, \ 1810 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 1811 } 1812 #define REGISTER_AA(name) REGISTER(name, A, A) 1813 #define REGISTER_AB(name) REGISTER(name, A, B) 1814 #define REGISTER_AZ(name) REGISTER(name, A, Z) 1815 #define REGISTER_BB(name) REGISTER(name, B, B) 1816 #define REGISTER_BZ(name) REGISTER(name, B, Z) 1817 #define REGISTER_CZ(name) REGISTER(name, C, Z) 1818 1819 static const struct efx_nic_reg efx_nic_regs[] = { 1820 REGISTER_AZ(ADR_REGION), 1821 REGISTER_AZ(INT_EN_KER), 1822 REGISTER_BZ(INT_EN_CHAR), 1823 REGISTER_AZ(INT_ADR_KER), 1824 REGISTER_BZ(INT_ADR_CHAR), 1825 /* INT_ACK_KER is WO */ 1826 /* INT_ISR0 is RC */ 1827 REGISTER_AZ(HW_INIT), 1828 REGISTER_CZ(USR_EV_CFG), 1829 REGISTER_AB(EE_SPI_HCMD), 1830 REGISTER_AB(EE_SPI_HADR), 1831 REGISTER_AB(EE_SPI_HDATA), 1832 REGISTER_AB(EE_BASE_PAGE), 1833 REGISTER_AB(EE_VPD_CFG0), 1834 /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ 1835 /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ 1836 /* PCIE_CORE_INDIRECT is indirect */ 1837 REGISTER_AB(NIC_STAT), 1838 REGISTER_AB(GPIO_CTL), 1839 REGISTER_AB(GLB_CTL), 1840 /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ 1841 REGISTER_BZ(DP_CTRL), 1842 REGISTER_AZ(MEM_STAT), 1843 REGISTER_AZ(CS_DEBUG), 1844 REGISTER_AZ(ALTERA_BUILD), 1845 REGISTER_AZ(CSR_SPARE), 1846 REGISTER_AB(PCIE_SD_CTL0123), 1847 REGISTER_AB(PCIE_SD_CTL45), 1848 REGISTER_AB(PCIE_PCS_CTL_STAT), 1849 /* DEBUG_DATA_OUT is not used */ 1850 /* DRV_EV is WO */ 1851 REGISTER_AZ(EVQ_CTL), 1852 REGISTER_AZ(EVQ_CNT1), 1853 REGISTER_AZ(EVQ_CNT2), 1854 REGISTER_AZ(BUF_TBL_CFG), 1855 REGISTER_AZ(SRM_RX_DC_CFG), 1856 REGISTER_AZ(SRM_TX_DC_CFG), 1857 REGISTER_AZ(SRM_CFG), 1858 /* BUF_TBL_UPD is WO */ 1859 REGISTER_AZ(SRM_UPD_EVQ), 1860 REGISTER_AZ(SRAM_PARITY), 1861 REGISTER_AZ(RX_CFG), 1862 REGISTER_BZ(RX_FILTER_CTL), 1863 /* RX_FLUSH_DESCQ is WO */ 1864 REGISTER_AZ(RX_DC_CFG), 1865 REGISTER_AZ(RX_DC_PF_WM), 1866 REGISTER_BZ(RX_RSS_TKEY), 1867 /* RX_NODESC_DROP is RC */ 1868 REGISTER_AA(RX_SELF_RST), 1869 /* RX_DEBUG, RX_PUSH_DROP are not used */ 1870 REGISTER_CZ(RX_RSS_IPV6_REG1), 1871 REGISTER_CZ(RX_RSS_IPV6_REG2), 1872 REGISTER_CZ(RX_RSS_IPV6_REG3), 1873 /* TX_FLUSH_DESCQ is WO */ 1874 REGISTER_AZ(TX_DC_CFG), 1875 REGISTER_AA(TX_CHKSM_CFG), 1876 REGISTER_AZ(TX_CFG), 1877 /* TX_PUSH_DROP is not used */ 1878 REGISTER_AZ(TX_RESERVED), 1879 REGISTER_BZ(TX_PACE), 1880 /* TX_PACE_DROP_QID is RC */ 1881 REGISTER_BB(TX_VLAN), 1882 REGISTER_BZ(TX_IPFIL_PORTEN), 1883 REGISTER_AB(MD_TXD), 1884 REGISTER_AB(MD_RXD), 1885 REGISTER_AB(MD_CS), 1886 REGISTER_AB(MD_PHY_ADR), 1887 REGISTER_AB(MD_ID), 1888 /* MD_STAT is RC */ 1889 REGISTER_AB(MAC_STAT_DMA), 1890 REGISTER_AB(MAC_CTRL), 1891 REGISTER_BB(GEN_MODE), 1892 REGISTER_AB(MAC_MC_HASH_REG0), 1893 REGISTER_AB(MAC_MC_HASH_REG1), 1894 REGISTER_AB(GM_CFG1), 1895 REGISTER_AB(GM_CFG2), 1896 /* GM_IPG and GM_HD are not used */ 1897 REGISTER_AB(GM_MAX_FLEN), 1898 /* GM_TEST is not used */ 1899 REGISTER_AB(GM_ADR1), 1900 REGISTER_AB(GM_ADR2), 1901 REGISTER_AB(GMF_CFG0), 1902 REGISTER_AB(GMF_CFG1), 1903 REGISTER_AB(GMF_CFG2), 1904 REGISTER_AB(GMF_CFG3), 1905 REGISTER_AB(GMF_CFG4), 1906 REGISTER_AB(GMF_CFG5), 1907 REGISTER_BB(TX_SRC_MAC_CTL), 1908 REGISTER_AB(XM_ADR_LO), 1909 REGISTER_AB(XM_ADR_HI), 1910 REGISTER_AB(XM_GLB_CFG), 1911 REGISTER_AB(XM_TX_CFG), 1912 REGISTER_AB(XM_RX_CFG), 1913 REGISTER_AB(XM_MGT_INT_MASK), 1914 REGISTER_AB(XM_FC), 1915 REGISTER_AB(XM_PAUSE_TIME), 1916 REGISTER_AB(XM_TX_PARAM), 1917 REGISTER_AB(XM_RX_PARAM), 1918 /* XM_MGT_INT_MSK (note no 'A') is RC */ 1919 REGISTER_AB(XX_PWR_RST), 1920 REGISTER_AB(XX_SD_CTL), 1921 REGISTER_AB(XX_TXDRV_CTL), 1922 /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 1923 /* XX_CORE_STAT is partly RC */ 1924 }; 1925 1926 struct efx_nic_reg_table { 1927 u32 offset:24; 1928 u32 min_revision:2, max_revision:2; 1929 u32 step:6, rows:21; 1930 }; 1931 1932 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 1933 offset, \ 1934 REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 1935 step, rows \ 1936 } 1937 #define REGISTER_TABLE(name, min_rev, max_rev) \ 1938 REGISTER_TABLE_DIMENSIONS( \ 1939 name, FR_ ## min_rev ## max_rev ## _ ## name, \ 1940 min_rev, max_rev, \ 1941 FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 1942 FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 1943 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 1944 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 1945 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 1946 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 1947 #define REGISTER_TABLE_BB_CZ(name) \ 1948 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 1949 FR_BZ_ ## name ## _STEP, \ 1950 FR_BB_ ## name ## _ROWS), \ 1951 REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 1952 FR_BZ_ ## name ## _STEP, \ 1953 FR_CZ_ ## name ## _ROWS) 1954 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 1955 1956 static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 1957 /* DRIVER is not used */ 1958 /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ 1959 REGISTER_TABLE_BB(TX_IPFIL_TBL), 1960 REGISTER_TABLE_BB(TX_SRC_MAC_TBL), 1961 REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), 1962 REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), 1963 REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), 1964 REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 1965 REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 1966 REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 1967 /* We can't reasonably read all of the buffer table (up to 8MB!). 1968 * However this driver will only use a few entries. Reading 1969 * 1K entries allows for some expansion of queue count and 1970 * size before we need to change the version. */ 1971 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 1972 A, A, 8, 1024), 1973 REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 1974 B, Z, 8, 1024), 1975 REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 1976 REGISTER_TABLE_BB_CZ(TIMER_TBL), 1977 REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 1978 REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), 1979 /* TX_FILTER_TBL0 is huge and not used by this driver */ 1980 REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), 1981 REGISTER_TABLE_CZ(MC_TREG_SMEM), 1982 /* MSIX_PBA_TABLE is not mapped */ 1983 /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 1984 REGISTER_TABLE_BZ(RX_FILTER_TBL0), 1985 }; 1986 1987 size_t efx_nic_get_regs_len(struct efx_nic *efx) 1988 { 1989 const struct efx_nic_reg *reg; 1990 const struct efx_nic_reg_table *table; 1991 size_t len = 0; 1992 1993 for (reg = efx_nic_regs; 1994 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 1995 reg++) 1996 if (efx->type->revision >= reg->min_revision && 1997 efx->type->revision <= reg->max_revision) 1998 len += sizeof(efx_oword_t); 1999 2000 for (table = efx_nic_reg_tables; 2001 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2002 table++) 2003 if (efx->type->revision >= table->min_revision && 2004 efx->type->revision <= table->max_revision) 2005 len += table->rows * min_t(size_t, table->step, 16); 2006 2007 return len; 2008 } 2009 2010 void efx_nic_get_regs(struct efx_nic *efx, void *buf) 2011 { 2012 const struct efx_nic_reg *reg; 2013 const struct efx_nic_reg_table *table; 2014 2015 for (reg = efx_nic_regs; 2016 reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 2017 reg++) { 2018 if (efx->type->revision >= reg->min_revision && 2019 efx->type->revision <= reg->max_revision) { 2020 efx_reado(efx, (efx_oword_t *)buf, reg->offset); 2021 buf += sizeof(efx_oword_t); 2022 } 2023 } 2024 2025 for (table = efx_nic_reg_tables; 2026 table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2027 table++) { 2028 size_t size, i; 2029 2030 if (!(efx->type->revision >= table->min_revision && 2031 efx->type->revision <= table->max_revision)) 2032 continue; 2033 2034 size = min_t(size_t, table->step, 16); 2035 2036 for (i = 0; i < table->rows; i++) { 2037 switch (table->step) { 2038 case 4: /* 32-bit SRAM */ 2039 efx_readd(efx, buf, table->offset + 4 * i); 2040 break; 2041 case 8: /* 64-bit SRAM */ 2042 efx_sram_readq(efx, 2043 efx->membase + table->offset, 2044 buf, i); 2045 break; 2046 case 16: /* 128-bit-readable register */ 2047 efx_reado_table(efx, buf, table->offset, i); 2048 break; 2049 case 32: /* 128-bit register, interleaved */ 2050 efx_reado_table(efx, buf, table->offset, 2 * i); 2051 break; 2052 default: 2053 WARN_ON(1); 2054 return; 2055 } 2056 buf += size; 2057 } 2058 } 2059 } 2060