1874aeea5SJeff Kirsher /**************************************************************************** 2874aeea5SJeff Kirsher * Driver for Solarflare Solarstorm network controllers and boards 3874aeea5SJeff Kirsher * Copyright 2005-2006 Fen Systems Ltd. 4874aeea5SJeff Kirsher * Copyright 2006-2011 Solarflare Communications Inc. 5874aeea5SJeff Kirsher * 6874aeea5SJeff Kirsher * This program is free software; you can redistribute it and/or modify it 7874aeea5SJeff Kirsher * under the terms of the GNU General Public License version 2 as published 8874aeea5SJeff Kirsher * by the Free Software Foundation, incorporated herein by reference. 9874aeea5SJeff Kirsher */ 10874aeea5SJeff Kirsher 11874aeea5SJeff Kirsher #include <linux/bitops.h> 12874aeea5SJeff Kirsher #include <linux/delay.h> 13874aeea5SJeff Kirsher #include <linux/interrupt.h> 14874aeea5SJeff Kirsher #include <linux/pci.h> 15874aeea5SJeff Kirsher #include <linux/module.h> 16874aeea5SJeff Kirsher #include <linux/seq_file.h> 17874aeea5SJeff Kirsher #include "net_driver.h" 18874aeea5SJeff Kirsher #include "bitfield.h" 19874aeea5SJeff Kirsher #include "efx.h" 20874aeea5SJeff Kirsher #include "nic.h" 21874aeea5SJeff Kirsher #include "regs.h" 22874aeea5SJeff Kirsher #include "io.h" 23874aeea5SJeff Kirsher #include "workarounds.h" 24874aeea5SJeff Kirsher 25874aeea5SJeff Kirsher /************************************************************************** 26874aeea5SJeff Kirsher * 27874aeea5SJeff Kirsher * Configurable values 28874aeea5SJeff Kirsher * 29874aeea5SJeff Kirsher ************************************************************************** 30874aeea5SJeff Kirsher */ 31874aeea5SJeff Kirsher 32874aeea5SJeff Kirsher /* This is set to 16 for a good reason. In summary, if larger than 33874aeea5SJeff Kirsher * 16, the descriptor cache holds more than a default socket 34874aeea5SJeff Kirsher * buffer's worth of packets (for UDP we can only have at most one 35874aeea5SJeff Kirsher * socket buffer's worth outstanding). This combined with the fact 36874aeea5SJeff Kirsher * that we only get 1 TX event per descriptor cache means the NIC 37874aeea5SJeff Kirsher * goes idle. 38874aeea5SJeff Kirsher */ 39874aeea5SJeff Kirsher #define TX_DC_ENTRIES 16 40874aeea5SJeff Kirsher #define TX_DC_ENTRIES_ORDER 1 41874aeea5SJeff Kirsher 42874aeea5SJeff Kirsher #define RX_DC_ENTRIES 64 43874aeea5SJeff Kirsher #define RX_DC_ENTRIES_ORDER 3 44874aeea5SJeff Kirsher 45874aeea5SJeff Kirsher /* If EFX_MAX_INT_ERRORS internal errors occur within 46874aeea5SJeff Kirsher * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 47874aeea5SJeff Kirsher * disable it. 48874aeea5SJeff Kirsher */ 49874aeea5SJeff Kirsher #define EFX_INT_ERROR_EXPIRE 3600 50874aeea5SJeff Kirsher #define EFX_MAX_INT_ERRORS 5 51874aeea5SJeff Kirsher 52874aeea5SJeff Kirsher /* Depth of RX flush request fifo */ 53874aeea5SJeff Kirsher #define EFX_RX_FLUSH_COUNT 4 54874aeea5SJeff Kirsher 554ef594ebSBen Hutchings /* Driver generated events */ 564ef594ebSBen Hutchings #define _EFX_CHANNEL_MAGIC_TEST 0x000101 574ef594ebSBen Hutchings #define _EFX_CHANNEL_MAGIC_FILL 0x000102 589f2cb71cSBen Hutchings #define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 599f2cb71cSBen Hutchings #define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 60874aeea5SJeff Kirsher 614ef594ebSBen Hutchings #define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 624ef594ebSBen Hutchings #define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 634ef594ebSBen Hutchings 644ef594ebSBen Hutchings #define EFX_CHANNEL_MAGIC_TEST(_channel) \ 654ef594ebSBen Hutchings _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) 662ae75dacSBen Hutchings #define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ 672ae75dacSBen Hutchings _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ 682ae75dacSBen Hutchings efx_rx_queue_index(_rx_queue)) 699f2cb71cSBen Hutchings #define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ 709f2cb71cSBen Hutchings _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ 719f2cb71cSBen Hutchings efx_rx_queue_index(_rx_queue)) 729f2cb71cSBen Hutchings #define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ 739f2cb71cSBen Hutchings _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ 749f2cb71cSBen Hutchings (_tx_queue)->queue) 75874aeea5SJeff Kirsher 76525d9e82SDaniel Pieczko static void efx_magic_event(struct efx_channel *channel, u32 magic); 77525d9e82SDaniel Pieczko 78874aeea5SJeff Kirsher /************************************************************************** 79874aeea5SJeff Kirsher * 80874aeea5SJeff Kirsher * Solarstorm hardware access 81874aeea5SJeff Kirsher * 82874aeea5SJeff Kirsher **************************************************************************/ 83874aeea5SJeff Kirsher 84874aeea5SJeff Kirsher static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 85874aeea5SJeff Kirsher unsigned int index) 86874aeea5SJeff Kirsher { 87874aeea5SJeff Kirsher efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 88874aeea5SJeff Kirsher value, index); 89874aeea5SJeff Kirsher } 90874aeea5SJeff Kirsher 91874aeea5SJeff Kirsher /* Read the current event from the event queue */ 92874aeea5SJeff Kirsher static inline efx_qword_t *efx_event(struct efx_channel *channel, 93874aeea5SJeff Kirsher unsigned int index) 94874aeea5SJeff Kirsher { 95874aeea5SJeff Kirsher return ((efx_qword_t *) (channel->eventq.addr)) + 96874aeea5SJeff Kirsher (index & channel->eventq_mask); 97874aeea5SJeff Kirsher } 98874aeea5SJeff Kirsher 99874aeea5SJeff Kirsher /* See if an event is present 100874aeea5SJeff Kirsher * 101874aeea5SJeff Kirsher * We check both the high and low dword of the event for all ones. We 102874aeea5SJeff Kirsher * wrote all ones when we cleared the event, and no valid event can 103874aeea5SJeff Kirsher * have all ones in either its high or low dwords. This approach is 104874aeea5SJeff Kirsher * robust against reordering. 105874aeea5SJeff Kirsher * 106874aeea5SJeff Kirsher * Note that using a single 64-bit comparison is incorrect; even 107874aeea5SJeff Kirsher * though the CPU read will be atomic, the DMA write may not be. 108874aeea5SJeff Kirsher */ 109874aeea5SJeff Kirsher static inline int efx_event_present(efx_qword_t *event) 110874aeea5SJeff Kirsher { 111874aeea5SJeff Kirsher return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | 112874aeea5SJeff Kirsher EFX_DWORD_IS_ALL_ONES(event->dword[1])); 113874aeea5SJeff Kirsher } 114874aeea5SJeff Kirsher 115874aeea5SJeff Kirsher static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 116874aeea5SJeff Kirsher const efx_oword_t *mask) 117874aeea5SJeff Kirsher { 118874aeea5SJeff Kirsher return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 119874aeea5SJeff Kirsher ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 120874aeea5SJeff Kirsher } 121874aeea5SJeff Kirsher 122874aeea5SJeff Kirsher int efx_nic_test_registers(struct efx_nic *efx, 123874aeea5SJeff Kirsher const struct efx_nic_register_test *regs, 124874aeea5SJeff Kirsher size_t n_regs) 125874aeea5SJeff Kirsher { 126874aeea5SJeff Kirsher unsigned address = 0, i, j; 127874aeea5SJeff Kirsher efx_oword_t mask, imask, original, reg, buf; 128874aeea5SJeff Kirsher 129874aeea5SJeff Kirsher for (i = 0; i < n_regs; ++i) { 130874aeea5SJeff Kirsher address = regs[i].address; 131874aeea5SJeff Kirsher mask = imask = regs[i].mask; 132874aeea5SJeff Kirsher EFX_INVERT_OWORD(imask); 133874aeea5SJeff Kirsher 134874aeea5SJeff Kirsher efx_reado(efx, &original, address); 135874aeea5SJeff Kirsher 136874aeea5SJeff Kirsher /* bit sweep on and off */ 137874aeea5SJeff Kirsher for (j = 0; j < 128; j++) { 138874aeea5SJeff Kirsher if (!EFX_EXTRACT_OWORD32(mask, j, j)) 139874aeea5SJeff Kirsher continue; 140874aeea5SJeff Kirsher 141874aeea5SJeff Kirsher /* Test this testable bit can be set in isolation */ 142874aeea5SJeff Kirsher EFX_AND_OWORD(reg, original, mask); 143874aeea5SJeff Kirsher EFX_SET_OWORD32(reg, j, j, 1); 144874aeea5SJeff Kirsher 145874aeea5SJeff Kirsher efx_writeo(efx, ®, address); 146874aeea5SJeff Kirsher efx_reado(efx, &buf, address); 147874aeea5SJeff Kirsher 148874aeea5SJeff Kirsher if (efx_masked_compare_oword(®, &buf, &mask)) 149874aeea5SJeff Kirsher goto fail; 150874aeea5SJeff Kirsher 151874aeea5SJeff Kirsher /* Test this testable bit can be cleared in isolation */ 152874aeea5SJeff Kirsher EFX_OR_OWORD(reg, original, mask); 153874aeea5SJeff Kirsher EFX_SET_OWORD32(reg, j, j, 0); 154874aeea5SJeff Kirsher 155874aeea5SJeff Kirsher efx_writeo(efx, ®, address); 156874aeea5SJeff Kirsher efx_reado(efx, &buf, address); 157874aeea5SJeff Kirsher 158874aeea5SJeff Kirsher if (efx_masked_compare_oword(®, &buf, &mask)) 159874aeea5SJeff Kirsher goto fail; 160874aeea5SJeff Kirsher } 161874aeea5SJeff Kirsher 162874aeea5SJeff Kirsher efx_writeo(efx, &original, address); 163874aeea5SJeff Kirsher } 164874aeea5SJeff Kirsher 165874aeea5SJeff Kirsher return 0; 166874aeea5SJeff Kirsher 167874aeea5SJeff Kirsher fail: 168874aeea5SJeff Kirsher netif_err(efx, hw, efx->net_dev, 169874aeea5SJeff Kirsher "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 170874aeea5SJeff Kirsher " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 171874aeea5SJeff Kirsher EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 172874aeea5SJeff Kirsher return -EIO; 173874aeea5SJeff Kirsher } 174874aeea5SJeff Kirsher 175874aeea5SJeff Kirsher /************************************************************************** 176874aeea5SJeff Kirsher * 177874aeea5SJeff Kirsher * Special buffer handling 178874aeea5SJeff Kirsher * Special buffers are used for event queues and the TX and RX 179874aeea5SJeff Kirsher * descriptor rings. 180874aeea5SJeff Kirsher * 181874aeea5SJeff Kirsher *************************************************************************/ 182874aeea5SJeff Kirsher 183874aeea5SJeff Kirsher /* 184874aeea5SJeff Kirsher * Initialise a special buffer 185874aeea5SJeff Kirsher * 186874aeea5SJeff Kirsher * This will define a buffer (previously allocated via 187874aeea5SJeff Kirsher * efx_alloc_special_buffer()) in the buffer table, allowing 188874aeea5SJeff Kirsher * it to be used for event queues, descriptor rings etc. 189874aeea5SJeff Kirsher */ 190874aeea5SJeff Kirsher static void 191874aeea5SJeff Kirsher efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 192874aeea5SJeff Kirsher { 193874aeea5SJeff Kirsher efx_qword_t buf_desc; 1945bbe2f4fSBen Hutchings unsigned int index; 195874aeea5SJeff Kirsher dma_addr_t dma_addr; 196874aeea5SJeff Kirsher int i; 197874aeea5SJeff Kirsher 198874aeea5SJeff Kirsher EFX_BUG_ON_PARANOID(!buffer->addr); 199874aeea5SJeff Kirsher 200874aeea5SJeff Kirsher /* Write buffer descriptors to NIC */ 201874aeea5SJeff Kirsher for (i = 0; i < buffer->entries; i++) { 202874aeea5SJeff Kirsher index = buffer->index + i; 2035b6262d0SBen Hutchings dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE); 204874aeea5SJeff Kirsher netif_dbg(efx, probe, efx->net_dev, 205874aeea5SJeff Kirsher "mapping special buffer %d at %llx\n", 206874aeea5SJeff Kirsher index, (unsigned long long)dma_addr); 207874aeea5SJeff Kirsher EFX_POPULATE_QWORD_3(buf_desc, 208874aeea5SJeff Kirsher FRF_AZ_BUF_ADR_REGION, 0, 209874aeea5SJeff Kirsher FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 210874aeea5SJeff Kirsher FRF_AZ_BUF_OWNER_ID_FBUF, 0); 211874aeea5SJeff Kirsher efx_write_buf_tbl(efx, &buf_desc, index); 212874aeea5SJeff Kirsher } 213874aeea5SJeff Kirsher } 214874aeea5SJeff Kirsher 215874aeea5SJeff Kirsher /* Unmaps a buffer and clears the buffer table entries */ 216874aeea5SJeff Kirsher static void 217874aeea5SJeff Kirsher efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 218874aeea5SJeff Kirsher { 219874aeea5SJeff Kirsher efx_oword_t buf_tbl_upd; 220874aeea5SJeff Kirsher unsigned int start = buffer->index; 221874aeea5SJeff Kirsher unsigned int end = (buffer->index + buffer->entries - 1); 222874aeea5SJeff Kirsher 223874aeea5SJeff Kirsher if (!buffer->entries) 224874aeea5SJeff Kirsher return; 225874aeea5SJeff Kirsher 226874aeea5SJeff Kirsher netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 227874aeea5SJeff Kirsher buffer->index, buffer->index + buffer->entries - 1); 228874aeea5SJeff Kirsher 229874aeea5SJeff Kirsher EFX_POPULATE_OWORD_4(buf_tbl_upd, 230874aeea5SJeff Kirsher FRF_AZ_BUF_UPD_CMD, 0, 231874aeea5SJeff Kirsher FRF_AZ_BUF_CLR_CMD, 1, 232874aeea5SJeff Kirsher FRF_AZ_BUF_CLR_END_ID, end, 233874aeea5SJeff Kirsher FRF_AZ_BUF_CLR_START_ID, start); 234874aeea5SJeff Kirsher efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 235874aeea5SJeff Kirsher } 236874aeea5SJeff Kirsher 237874aeea5SJeff Kirsher /* 238874aeea5SJeff Kirsher * Allocate a new special buffer 239874aeea5SJeff Kirsher * 240874aeea5SJeff Kirsher * This allocates memory for a new buffer, clears it and allocates a 241874aeea5SJeff Kirsher * new buffer ID range. It does not write into the buffer table. 242874aeea5SJeff Kirsher * 243874aeea5SJeff Kirsher * This call will allocate 4KB buffers, since 8KB buffers can't be 244874aeea5SJeff Kirsher * used for event queues and descriptor rings. 245874aeea5SJeff Kirsher */ 246874aeea5SJeff Kirsher static int efx_alloc_special_buffer(struct efx_nic *efx, 247874aeea5SJeff Kirsher struct efx_special_buffer *buffer, 248874aeea5SJeff Kirsher unsigned int len) 249874aeea5SJeff Kirsher { 250874aeea5SJeff Kirsher len = ALIGN(len, EFX_BUF_SIZE); 251874aeea5SJeff Kirsher 252874aeea5SJeff Kirsher buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 253874aeea5SJeff Kirsher &buffer->dma_addr, GFP_KERNEL); 254874aeea5SJeff Kirsher if (!buffer->addr) 255874aeea5SJeff Kirsher return -ENOMEM; 256874aeea5SJeff Kirsher buffer->len = len; 257874aeea5SJeff Kirsher buffer->entries = len / EFX_BUF_SIZE; 258874aeea5SJeff Kirsher BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); 259874aeea5SJeff Kirsher 260874aeea5SJeff Kirsher /* Select new buffer ID */ 261874aeea5SJeff Kirsher buffer->index = efx->next_buffer_table; 262874aeea5SJeff Kirsher efx->next_buffer_table += buffer->entries; 263cd2d5b52SBen Hutchings #ifdef CONFIG_SFC_SRIOV 264cd2d5b52SBen Hutchings BUG_ON(efx_sriov_enabled(efx) && 265cd2d5b52SBen Hutchings efx->vf_buftbl_base < efx->next_buffer_table); 266cd2d5b52SBen Hutchings #endif 267874aeea5SJeff Kirsher 268874aeea5SJeff Kirsher netif_dbg(efx, probe, efx->net_dev, 269874aeea5SJeff Kirsher "allocating special buffers %d-%d at %llx+%x " 270874aeea5SJeff Kirsher "(virt %p phys %llx)\n", buffer->index, 271874aeea5SJeff Kirsher buffer->index + buffer->entries - 1, 272874aeea5SJeff Kirsher (u64)buffer->dma_addr, len, 273874aeea5SJeff Kirsher buffer->addr, (u64)virt_to_phys(buffer->addr)); 274874aeea5SJeff Kirsher 275874aeea5SJeff Kirsher return 0; 276874aeea5SJeff Kirsher } 277874aeea5SJeff Kirsher 278874aeea5SJeff Kirsher static void 279874aeea5SJeff Kirsher efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 280874aeea5SJeff Kirsher { 281874aeea5SJeff Kirsher if (!buffer->addr) 282874aeea5SJeff Kirsher return; 283874aeea5SJeff Kirsher 284874aeea5SJeff Kirsher netif_dbg(efx, hw, efx->net_dev, 285874aeea5SJeff Kirsher "deallocating special buffers %d-%d at %llx+%x " 286874aeea5SJeff Kirsher "(virt %p phys %llx)\n", buffer->index, 287874aeea5SJeff Kirsher buffer->index + buffer->entries - 1, 288874aeea5SJeff Kirsher (u64)buffer->dma_addr, buffer->len, 289874aeea5SJeff Kirsher buffer->addr, (u64)virt_to_phys(buffer->addr)); 290874aeea5SJeff Kirsher 291874aeea5SJeff Kirsher dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, 292874aeea5SJeff Kirsher buffer->dma_addr); 293874aeea5SJeff Kirsher buffer->addr = NULL; 294874aeea5SJeff Kirsher buffer->entries = 0; 295874aeea5SJeff Kirsher } 296874aeea5SJeff Kirsher 297874aeea5SJeff Kirsher /************************************************************************** 298874aeea5SJeff Kirsher * 299874aeea5SJeff Kirsher * Generic buffer handling 300f7251a9cSBen Hutchings * These buffers are used for interrupt status, MAC stats, etc. 301874aeea5SJeff Kirsher * 302874aeea5SJeff Kirsher **************************************************************************/ 303874aeea5SJeff Kirsher 304874aeea5SJeff Kirsher int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, 305874aeea5SJeff Kirsher unsigned int len) 306874aeea5SJeff Kirsher { 3070e33d870SBen Hutchings buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 3081f9061d2SJoe Perches &buffer->dma_addr, 3091f9061d2SJoe Perches GFP_ATOMIC | __GFP_ZERO); 310874aeea5SJeff Kirsher if (!buffer->addr) 311874aeea5SJeff Kirsher return -ENOMEM; 312874aeea5SJeff Kirsher buffer->len = len; 313874aeea5SJeff Kirsher return 0; 314874aeea5SJeff Kirsher } 315874aeea5SJeff Kirsher 316874aeea5SJeff Kirsher void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) 317874aeea5SJeff Kirsher { 318874aeea5SJeff Kirsher if (buffer->addr) { 3190e33d870SBen Hutchings dma_free_coherent(&efx->pci_dev->dev, buffer->len, 320874aeea5SJeff Kirsher buffer->addr, buffer->dma_addr); 321874aeea5SJeff Kirsher buffer->addr = NULL; 322874aeea5SJeff Kirsher } 323874aeea5SJeff Kirsher } 324874aeea5SJeff Kirsher 325874aeea5SJeff Kirsher /************************************************************************** 326874aeea5SJeff Kirsher * 327874aeea5SJeff Kirsher * TX path 328874aeea5SJeff Kirsher * 329874aeea5SJeff Kirsher **************************************************************************/ 330874aeea5SJeff Kirsher 331874aeea5SJeff Kirsher /* Returns a pointer to the specified transmit descriptor in the TX 332874aeea5SJeff Kirsher * descriptor queue belonging to the specified channel. 333874aeea5SJeff Kirsher */ 334874aeea5SJeff Kirsher static inline efx_qword_t * 335874aeea5SJeff Kirsher efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) 336874aeea5SJeff Kirsher { 337874aeea5SJeff Kirsher return ((efx_qword_t *) (tx_queue->txd.addr)) + index; 338874aeea5SJeff Kirsher } 339874aeea5SJeff Kirsher 340874aeea5SJeff Kirsher /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 341874aeea5SJeff Kirsher static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) 342874aeea5SJeff Kirsher { 343874aeea5SJeff Kirsher unsigned write_ptr; 344874aeea5SJeff Kirsher efx_dword_t reg; 345874aeea5SJeff Kirsher 346874aeea5SJeff Kirsher write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 347874aeea5SJeff Kirsher EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 348874aeea5SJeff Kirsher efx_writed_page(tx_queue->efx, ®, 349874aeea5SJeff Kirsher FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 350874aeea5SJeff Kirsher } 351874aeea5SJeff Kirsher 352874aeea5SJeff Kirsher /* Write pointer and first descriptor for TX descriptor ring */ 353874aeea5SJeff Kirsher static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, 354874aeea5SJeff Kirsher const efx_qword_t *txd) 355874aeea5SJeff Kirsher { 356874aeea5SJeff Kirsher unsigned write_ptr; 357874aeea5SJeff Kirsher efx_oword_t reg; 358874aeea5SJeff Kirsher 359874aeea5SJeff Kirsher BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 360874aeea5SJeff Kirsher BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 361874aeea5SJeff Kirsher 362874aeea5SJeff Kirsher write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 363874aeea5SJeff Kirsher EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 364874aeea5SJeff Kirsher FRF_AZ_TX_DESC_WPTR, write_ptr); 365874aeea5SJeff Kirsher reg.qword[0] = *txd; 366874aeea5SJeff Kirsher efx_writeo_page(tx_queue->efx, ®, 367874aeea5SJeff Kirsher FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 368874aeea5SJeff Kirsher } 369874aeea5SJeff Kirsher 370874aeea5SJeff Kirsher static inline bool 371874aeea5SJeff Kirsher efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) 372874aeea5SJeff Kirsher { 373874aeea5SJeff Kirsher unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); 374874aeea5SJeff Kirsher 375874aeea5SJeff Kirsher if (empty_read_count == 0) 376874aeea5SJeff Kirsher return false; 377874aeea5SJeff Kirsher 378874aeea5SJeff Kirsher tx_queue->empty_read_count = 0; 379fae8563bSBen Hutchings return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0 380fae8563bSBen Hutchings && tx_queue->write_count - write_count == 1; 381874aeea5SJeff Kirsher } 382874aeea5SJeff Kirsher 383874aeea5SJeff Kirsher /* For each entry inserted into the software descriptor ring, create a 384874aeea5SJeff Kirsher * descriptor in the hardware TX descriptor ring (in host memory), and 385874aeea5SJeff Kirsher * write a doorbell. 386874aeea5SJeff Kirsher */ 387874aeea5SJeff Kirsher void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) 388874aeea5SJeff Kirsher { 389874aeea5SJeff Kirsher 390874aeea5SJeff Kirsher struct efx_tx_buffer *buffer; 391874aeea5SJeff Kirsher efx_qword_t *txd; 392874aeea5SJeff Kirsher unsigned write_ptr; 393874aeea5SJeff Kirsher unsigned old_write_count = tx_queue->write_count; 394874aeea5SJeff Kirsher 395874aeea5SJeff Kirsher BUG_ON(tx_queue->write_count == tx_queue->insert_count); 396874aeea5SJeff Kirsher 397874aeea5SJeff Kirsher do { 398874aeea5SJeff Kirsher write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 399874aeea5SJeff Kirsher buffer = &tx_queue->buffer[write_ptr]; 400874aeea5SJeff Kirsher txd = efx_tx_desc(tx_queue, write_ptr); 401874aeea5SJeff Kirsher ++tx_queue->write_count; 402874aeea5SJeff Kirsher 403874aeea5SJeff Kirsher /* Create TX descriptor ring entry */ 4047668ff9cSBen Hutchings BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 405874aeea5SJeff Kirsher EFX_POPULATE_QWORD_4(*txd, 4067668ff9cSBen Hutchings FSF_AZ_TX_KER_CONT, 4077668ff9cSBen Hutchings buffer->flags & EFX_TX_BUF_CONT, 408874aeea5SJeff Kirsher FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 409874aeea5SJeff Kirsher FSF_AZ_TX_KER_BUF_REGION, 0, 410874aeea5SJeff Kirsher FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 411874aeea5SJeff Kirsher } while (tx_queue->write_count != tx_queue->insert_count); 412874aeea5SJeff Kirsher 413874aeea5SJeff Kirsher wmb(); /* Ensure descriptors are written before they are fetched */ 414874aeea5SJeff Kirsher 415874aeea5SJeff Kirsher if (efx_may_push_tx_desc(tx_queue, old_write_count)) { 416874aeea5SJeff Kirsher txd = efx_tx_desc(tx_queue, 417874aeea5SJeff Kirsher old_write_count & tx_queue->ptr_mask); 418874aeea5SJeff Kirsher efx_push_tx_desc(tx_queue, txd); 419874aeea5SJeff Kirsher ++tx_queue->pushes; 420874aeea5SJeff Kirsher } else { 421874aeea5SJeff Kirsher efx_notify_tx_desc(tx_queue); 422874aeea5SJeff Kirsher } 423874aeea5SJeff Kirsher } 424874aeea5SJeff Kirsher 425874aeea5SJeff Kirsher /* Allocate hardware resources for a TX queue */ 426874aeea5SJeff Kirsher int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) 427874aeea5SJeff Kirsher { 428874aeea5SJeff Kirsher struct efx_nic *efx = tx_queue->efx; 429874aeea5SJeff Kirsher unsigned entries; 430874aeea5SJeff Kirsher 431874aeea5SJeff Kirsher entries = tx_queue->ptr_mask + 1; 432874aeea5SJeff Kirsher return efx_alloc_special_buffer(efx, &tx_queue->txd, 433874aeea5SJeff Kirsher entries * sizeof(efx_qword_t)); 434874aeea5SJeff Kirsher } 435874aeea5SJeff Kirsher 436874aeea5SJeff Kirsher void efx_nic_init_tx(struct efx_tx_queue *tx_queue) 437874aeea5SJeff Kirsher { 438874aeea5SJeff Kirsher struct efx_nic *efx = tx_queue->efx; 439874aeea5SJeff Kirsher efx_oword_t reg; 440874aeea5SJeff Kirsher 441874aeea5SJeff Kirsher /* Pin TX descriptor ring */ 442874aeea5SJeff Kirsher efx_init_special_buffer(efx, &tx_queue->txd); 443874aeea5SJeff Kirsher 444874aeea5SJeff Kirsher /* Push TX descriptor ring to card */ 445874aeea5SJeff Kirsher EFX_POPULATE_OWORD_10(reg, 446874aeea5SJeff Kirsher FRF_AZ_TX_DESCQ_EN, 1, 447874aeea5SJeff Kirsher FRF_AZ_TX_ISCSI_DDIG_EN, 0, 448874aeea5SJeff Kirsher FRF_AZ_TX_ISCSI_HDIG_EN, 0, 449874aeea5SJeff Kirsher FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 450874aeea5SJeff Kirsher FRF_AZ_TX_DESCQ_EVQ_ID, 451874aeea5SJeff Kirsher tx_queue->channel->channel, 452874aeea5SJeff Kirsher FRF_AZ_TX_DESCQ_OWNER_ID, 0, 453874aeea5SJeff Kirsher FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 454874aeea5SJeff Kirsher FRF_AZ_TX_DESCQ_SIZE, 455874aeea5SJeff Kirsher __ffs(tx_queue->txd.entries), 456874aeea5SJeff Kirsher FRF_AZ_TX_DESCQ_TYPE, 0, 457874aeea5SJeff Kirsher FRF_BZ_TX_NON_IP_DROP_DIS, 1); 458874aeea5SJeff Kirsher 459874aeea5SJeff Kirsher if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 460874aeea5SJeff Kirsher int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 461874aeea5SJeff Kirsher EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 462874aeea5SJeff Kirsher EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, 463874aeea5SJeff Kirsher !csum); 464874aeea5SJeff Kirsher } 465874aeea5SJeff Kirsher 466874aeea5SJeff Kirsher efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 467874aeea5SJeff Kirsher tx_queue->queue); 468874aeea5SJeff Kirsher 469874aeea5SJeff Kirsher if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 470874aeea5SJeff Kirsher /* Only 128 bits in this register */ 471874aeea5SJeff Kirsher BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 472874aeea5SJeff Kirsher 473874aeea5SJeff Kirsher efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 474874aeea5SJeff Kirsher if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 47532766ec8SBen Hutchings __clear_bit_le(tx_queue->queue, ®); 476874aeea5SJeff Kirsher else 47732766ec8SBen Hutchings __set_bit_le(tx_queue->queue, ®); 478874aeea5SJeff Kirsher efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 479874aeea5SJeff Kirsher } 480874aeea5SJeff Kirsher 481874aeea5SJeff Kirsher if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 482874aeea5SJeff Kirsher EFX_POPULATE_OWORD_1(reg, 483874aeea5SJeff Kirsher FRF_BZ_TX_PACE, 484874aeea5SJeff Kirsher (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 485874aeea5SJeff Kirsher FFE_BZ_TX_PACE_OFF : 486874aeea5SJeff Kirsher FFE_BZ_TX_PACE_RESERVED); 487874aeea5SJeff Kirsher efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, 488874aeea5SJeff Kirsher tx_queue->queue); 489874aeea5SJeff Kirsher } 490874aeea5SJeff Kirsher } 491874aeea5SJeff Kirsher 492874aeea5SJeff Kirsher static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) 493874aeea5SJeff Kirsher { 494874aeea5SJeff Kirsher struct efx_nic *efx = tx_queue->efx; 495874aeea5SJeff Kirsher efx_oword_t tx_flush_descq; 496874aeea5SJeff Kirsher 497525d9e82SDaniel Pieczko WARN_ON(atomic_read(&tx_queue->flush_outstanding)); 498525d9e82SDaniel Pieczko atomic_set(&tx_queue->flush_outstanding, 1); 499525d9e82SDaniel Pieczko 500874aeea5SJeff Kirsher EFX_POPULATE_OWORD_2(tx_flush_descq, 501874aeea5SJeff Kirsher FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 502874aeea5SJeff Kirsher FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 503874aeea5SJeff Kirsher efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 504874aeea5SJeff Kirsher } 505874aeea5SJeff Kirsher 506874aeea5SJeff Kirsher void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) 507874aeea5SJeff Kirsher { 508874aeea5SJeff Kirsher struct efx_nic *efx = tx_queue->efx; 509874aeea5SJeff Kirsher efx_oword_t tx_desc_ptr; 510874aeea5SJeff Kirsher 511874aeea5SJeff Kirsher /* Remove TX descriptor ring from card */ 512874aeea5SJeff Kirsher EFX_ZERO_OWORD(tx_desc_ptr); 513874aeea5SJeff Kirsher efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 514874aeea5SJeff Kirsher tx_queue->queue); 515874aeea5SJeff Kirsher 516874aeea5SJeff Kirsher /* Unpin TX descriptor ring */ 517874aeea5SJeff Kirsher efx_fini_special_buffer(efx, &tx_queue->txd); 518874aeea5SJeff Kirsher } 519874aeea5SJeff Kirsher 520874aeea5SJeff Kirsher /* Free buffers backing TX queue */ 521874aeea5SJeff Kirsher void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) 522874aeea5SJeff Kirsher { 523874aeea5SJeff Kirsher efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 524874aeea5SJeff Kirsher } 525874aeea5SJeff Kirsher 526874aeea5SJeff Kirsher /************************************************************************** 527874aeea5SJeff Kirsher * 528874aeea5SJeff Kirsher * RX path 529874aeea5SJeff Kirsher * 530874aeea5SJeff Kirsher **************************************************************************/ 531874aeea5SJeff Kirsher 532874aeea5SJeff Kirsher /* Returns a pointer to the specified descriptor in the RX descriptor queue */ 533874aeea5SJeff Kirsher static inline efx_qword_t * 534874aeea5SJeff Kirsher efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) 535874aeea5SJeff Kirsher { 536874aeea5SJeff Kirsher return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; 537874aeea5SJeff Kirsher } 538874aeea5SJeff Kirsher 539874aeea5SJeff Kirsher /* This creates an entry in the RX descriptor queue */ 540874aeea5SJeff Kirsher static inline void 541874aeea5SJeff Kirsher efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 542874aeea5SJeff Kirsher { 543874aeea5SJeff Kirsher struct efx_rx_buffer *rx_buf; 544874aeea5SJeff Kirsher efx_qword_t *rxd; 545874aeea5SJeff Kirsher 546874aeea5SJeff Kirsher rxd = efx_rx_desc(rx_queue, index); 547874aeea5SJeff Kirsher rx_buf = efx_rx_buffer(rx_queue, index); 548874aeea5SJeff Kirsher EFX_POPULATE_QWORD_3(*rxd, 549874aeea5SJeff Kirsher FSF_AZ_RX_KER_BUF_SIZE, 550874aeea5SJeff Kirsher rx_buf->len - 551874aeea5SJeff Kirsher rx_queue->efx->type->rx_buffer_padding, 552874aeea5SJeff Kirsher FSF_AZ_RX_KER_BUF_REGION, 0, 553874aeea5SJeff Kirsher FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 554874aeea5SJeff Kirsher } 555874aeea5SJeff Kirsher 556874aeea5SJeff Kirsher /* This writes to the RX_DESC_WPTR register for the specified receive 557874aeea5SJeff Kirsher * descriptor ring. 558874aeea5SJeff Kirsher */ 559874aeea5SJeff Kirsher void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) 560874aeea5SJeff Kirsher { 561874aeea5SJeff Kirsher struct efx_nic *efx = rx_queue->efx; 562874aeea5SJeff Kirsher efx_dword_t reg; 563874aeea5SJeff Kirsher unsigned write_ptr; 564874aeea5SJeff Kirsher 565874aeea5SJeff Kirsher while (rx_queue->notified_count != rx_queue->added_count) { 566874aeea5SJeff Kirsher efx_build_rx_desc( 567874aeea5SJeff Kirsher rx_queue, 568874aeea5SJeff Kirsher rx_queue->notified_count & rx_queue->ptr_mask); 569874aeea5SJeff Kirsher ++rx_queue->notified_count; 570874aeea5SJeff Kirsher } 571874aeea5SJeff Kirsher 572874aeea5SJeff Kirsher wmb(); 573874aeea5SJeff Kirsher write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 574874aeea5SJeff Kirsher EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 575874aeea5SJeff Kirsher efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 576874aeea5SJeff Kirsher efx_rx_queue_index(rx_queue)); 577874aeea5SJeff Kirsher } 578874aeea5SJeff Kirsher 579874aeea5SJeff Kirsher int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) 580874aeea5SJeff Kirsher { 581874aeea5SJeff Kirsher struct efx_nic *efx = rx_queue->efx; 582874aeea5SJeff Kirsher unsigned entries; 583874aeea5SJeff Kirsher 584874aeea5SJeff Kirsher entries = rx_queue->ptr_mask + 1; 585874aeea5SJeff Kirsher return efx_alloc_special_buffer(efx, &rx_queue->rxd, 586874aeea5SJeff Kirsher entries * sizeof(efx_qword_t)); 587874aeea5SJeff Kirsher } 588874aeea5SJeff Kirsher 589874aeea5SJeff Kirsher void efx_nic_init_rx(struct efx_rx_queue *rx_queue) 590874aeea5SJeff Kirsher { 591874aeea5SJeff Kirsher efx_oword_t rx_desc_ptr; 592874aeea5SJeff Kirsher struct efx_nic *efx = rx_queue->efx; 593874aeea5SJeff Kirsher bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 594874aeea5SJeff Kirsher bool iscsi_digest_en = is_b0; 59585740cdfSBen Hutchings bool jumbo_en; 59685740cdfSBen Hutchings 59785740cdfSBen Hutchings /* For kernel-mode queues in Falcon A1, the JUMBO flag enables 59885740cdfSBen Hutchings * DMA to continue after a PCIe page boundary (and scattering 59985740cdfSBen Hutchings * is not possible). In Falcon B0 and Siena, it enables 60085740cdfSBen Hutchings * scatter. 60185740cdfSBen Hutchings */ 60285740cdfSBen Hutchings jumbo_en = !is_b0 || efx->rx_scatter; 603874aeea5SJeff Kirsher 604874aeea5SJeff Kirsher netif_dbg(efx, hw, efx->net_dev, 605874aeea5SJeff Kirsher "RX queue %d ring in special buffers %d-%d\n", 606874aeea5SJeff Kirsher efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 607874aeea5SJeff Kirsher rx_queue->rxd.index + rx_queue->rxd.entries - 1); 608874aeea5SJeff Kirsher 60985740cdfSBen Hutchings rx_queue->scatter_n = 0; 61085740cdfSBen Hutchings 611874aeea5SJeff Kirsher /* Pin RX descriptor ring */ 612874aeea5SJeff Kirsher efx_init_special_buffer(efx, &rx_queue->rxd); 613874aeea5SJeff Kirsher 614874aeea5SJeff Kirsher /* Push RX descriptor ring to card */ 615874aeea5SJeff Kirsher EFX_POPULATE_OWORD_10(rx_desc_ptr, 616874aeea5SJeff Kirsher FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 617874aeea5SJeff Kirsher FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 618874aeea5SJeff Kirsher FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 619874aeea5SJeff Kirsher FRF_AZ_RX_DESCQ_EVQ_ID, 620874aeea5SJeff Kirsher efx_rx_queue_channel(rx_queue)->channel, 621874aeea5SJeff Kirsher FRF_AZ_RX_DESCQ_OWNER_ID, 0, 622874aeea5SJeff Kirsher FRF_AZ_RX_DESCQ_LABEL, 623874aeea5SJeff Kirsher efx_rx_queue_index(rx_queue), 624874aeea5SJeff Kirsher FRF_AZ_RX_DESCQ_SIZE, 625874aeea5SJeff Kirsher __ffs(rx_queue->rxd.entries), 626874aeea5SJeff Kirsher FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 62785740cdfSBen Hutchings FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, 628874aeea5SJeff Kirsher FRF_AZ_RX_DESCQ_EN, 1); 629874aeea5SJeff Kirsher efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 630874aeea5SJeff Kirsher efx_rx_queue_index(rx_queue)); 631874aeea5SJeff Kirsher } 632874aeea5SJeff Kirsher 633874aeea5SJeff Kirsher static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) 634874aeea5SJeff Kirsher { 635874aeea5SJeff Kirsher struct efx_nic *efx = rx_queue->efx; 636874aeea5SJeff Kirsher efx_oword_t rx_flush_descq; 637874aeea5SJeff Kirsher 638874aeea5SJeff Kirsher EFX_POPULATE_OWORD_2(rx_flush_descq, 639874aeea5SJeff Kirsher FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 640874aeea5SJeff Kirsher FRF_AZ_RX_FLUSH_DESCQ, 641874aeea5SJeff Kirsher efx_rx_queue_index(rx_queue)); 642874aeea5SJeff Kirsher efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 643874aeea5SJeff Kirsher } 644874aeea5SJeff Kirsher 645874aeea5SJeff Kirsher void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) 646874aeea5SJeff Kirsher { 647874aeea5SJeff Kirsher efx_oword_t rx_desc_ptr; 648874aeea5SJeff Kirsher struct efx_nic *efx = rx_queue->efx; 649874aeea5SJeff Kirsher 650874aeea5SJeff Kirsher /* Remove RX descriptor ring from card */ 651874aeea5SJeff Kirsher EFX_ZERO_OWORD(rx_desc_ptr); 652874aeea5SJeff Kirsher efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 653874aeea5SJeff Kirsher efx_rx_queue_index(rx_queue)); 654874aeea5SJeff Kirsher 655874aeea5SJeff Kirsher /* Unpin RX descriptor ring */ 656874aeea5SJeff Kirsher efx_fini_special_buffer(efx, &rx_queue->rxd); 657874aeea5SJeff Kirsher } 658874aeea5SJeff Kirsher 659874aeea5SJeff Kirsher /* Free buffers backing RX queue */ 660874aeea5SJeff Kirsher void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) 661874aeea5SJeff Kirsher { 662874aeea5SJeff Kirsher efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 663874aeea5SJeff Kirsher } 664874aeea5SJeff Kirsher 665874aeea5SJeff Kirsher /************************************************************************** 666874aeea5SJeff Kirsher * 6679f2cb71cSBen Hutchings * Flush handling 6689f2cb71cSBen Hutchings * 6699f2cb71cSBen Hutchings **************************************************************************/ 6709f2cb71cSBen Hutchings 6719f2cb71cSBen Hutchings /* efx_nic_flush_queues() must be woken up when all flushes are completed, 6729f2cb71cSBen Hutchings * or more RX flushes can be kicked off. 6739f2cb71cSBen Hutchings */ 6749f2cb71cSBen Hutchings static bool efx_flush_wake(struct efx_nic *efx) 6759f2cb71cSBen Hutchings { 6769f2cb71cSBen Hutchings /* Ensure that all updates are visible to efx_nic_flush_queues() */ 6779f2cb71cSBen Hutchings smp_mb(); 6789f2cb71cSBen Hutchings 6799f2cb71cSBen Hutchings return (atomic_read(&efx->drain_pending) == 0 || 6809f2cb71cSBen Hutchings (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT 6819f2cb71cSBen Hutchings && atomic_read(&efx->rxq_flush_pending) > 0)); 6829f2cb71cSBen Hutchings } 6839f2cb71cSBen Hutchings 684525d9e82SDaniel Pieczko static bool efx_check_tx_flush_complete(struct efx_nic *efx) 685525d9e82SDaniel Pieczko { 686525d9e82SDaniel Pieczko bool i = true; 687525d9e82SDaniel Pieczko efx_oword_t txd_ptr_tbl; 688525d9e82SDaniel Pieczko struct efx_channel *channel; 689525d9e82SDaniel Pieczko struct efx_tx_queue *tx_queue; 690525d9e82SDaniel Pieczko 691525d9e82SDaniel Pieczko efx_for_each_channel(channel, efx) { 692525d9e82SDaniel Pieczko efx_for_each_channel_tx_queue(tx_queue, channel) { 693525d9e82SDaniel Pieczko efx_reado_table(efx, &txd_ptr_tbl, 694525d9e82SDaniel Pieczko FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); 695525d9e82SDaniel Pieczko if (EFX_OWORD_FIELD(txd_ptr_tbl, 696525d9e82SDaniel Pieczko FRF_AZ_TX_DESCQ_FLUSH) || 697525d9e82SDaniel Pieczko EFX_OWORD_FIELD(txd_ptr_tbl, 698525d9e82SDaniel Pieczko FRF_AZ_TX_DESCQ_EN)) { 699525d9e82SDaniel Pieczko netif_dbg(efx, hw, efx->net_dev, 700525d9e82SDaniel Pieczko "flush did not complete on TXQ %d\n", 701525d9e82SDaniel Pieczko tx_queue->queue); 702525d9e82SDaniel Pieczko i = false; 703525d9e82SDaniel Pieczko } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, 704525d9e82SDaniel Pieczko 1, 0)) { 705525d9e82SDaniel Pieczko /* The flush is complete, but we didn't 706525d9e82SDaniel Pieczko * receive a flush completion event 707525d9e82SDaniel Pieczko */ 708525d9e82SDaniel Pieczko netif_dbg(efx, hw, efx->net_dev, 709525d9e82SDaniel Pieczko "flush complete on TXQ %d, so drain " 710525d9e82SDaniel Pieczko "the queue\n", tx_queue->queue); 711525d9e82SDaniel Pieczko /* Don't need to increment drain_pending as it 712525d9e82SDaniel Pieczko * has already been incremented for the queues 713525d9e82SDaniel Pieczko * which did not drain 714525d9e82SDaniel Pieczko */ 715525d9e82SDaniel Pieczko efx_magic_event(channel, 716525d9e82SDaniel Pieczko EFX_CHANNEL_MAGIC_TX_DRAIN( 717525d9e82SDaniel Pieczko tx_queue)); 718525d9e82SDaniel Pieczko } 719525d9e82SDaniel Pieczko } 720525d9e82SDaniel Pieczko } 721525d9e82SDaniel Pieczko 722525d9e82SDaniel Pieczko return i; 723525d9e82SDaniel Pieczko } 724525d9e82SDaniel Pieczko 7259f2cb71cSBen Hutchings /* Flush all the transmit queues, and continue flushing receive queues until 7269f2cb71cSBen Hutchings * they're all flushed. Wait for the DRAIN events to be recieved so that there 7279f2cb71cSBen Hutchings * are no more RX and TX events left on any channel. */ 7289f2cb71cSBen Hutchings int efx_nic_flush_queues(struct efx_nic *efx) 7299f2cb71cSBen Hutchings { 7309f2cb71cSBen Hutchings unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ 7319f2cb71cSBen Hutchings struct efx_channel *channel; 7329f2cb71cSBen Hutchings struct efx_rx_queue *rx_queue; 7339f2cb71cSBen Hutchings struct efx_tx_queue *tx_queue; 7349f2cb71cSBen Hutchings int rc = 0; 7359f2cb71cSBen Hutchings 7369f2cb71cSBen Hutchings efx->type->prepare_flush(efx); 7379f2cb71cSBen Hutchings 7389f2cb71cSBen Hutchings efx_for_each_channel(channel, efx) { 7399f2cb71cSBen Hutchings efx_for_each_channel_tx_queue(tx_queue, channel) { 7409f2cb71cSBen Hutchings atomic_inc(&efx->drain_pending); 7419f2cb71cSBen Hutchings efx_flush_tx_queue(tx_queue); 7429f2cb71cSBen Hutchings } 7439f2cb71cSBen Hutchings efx_for_each_channel_rx_queue(rx_queue, channel) { 7449f2cb71cSBen Hutchings atomic_inc(&efx->drain_pending); 7459f2cb71cSBen Hutchings rx_queue->flush_pending = true; 7469f2cb71cSBen Hutchings atomic_inc(&efx->rxq_flush_pending); 7479f2cb71cSBen Hutchings } 7489f2cb71cSBen Hutchings } 7499f2cb71cSBen Hutchings 7509f2cb71cSBen Hutchings while (timeout && atomic_read(&efx->drain_pending) > 0) { 751cd2d5b52SBen Hutchings /* If SRIOV is enabled, then offload receive queue flushing to 752cd2d5b52SBen Hutchings * the firmware (though we will still have to poll for 753cd2d5b52SBen Hutchings * completion). If that fails, fall back to the old scheme. 754cd2d5b52SBen Hutchings */ 755cd2d5b52SBen Hutchings if (efx_sriov_enabled(efx)) { 756cd2d5b52SBen Hutchings rc = efx_mcdi_flush_rxqs(efx); 757cd2d5b52SBen Hutchings if (!rc) 758cd2d5b52SBen Hutchings goto wait; 759cd2d5b52SBen Hutchings } 760cd2d5b52SBen Hutchings 7619f2cb71cSBen Hutchings /* The hardware supports four concurrent rx flushes, each of 7629f2cb71cSBen Hutchings * which may need to be retried if there is an outstanding 7639f2cb71cSBen Hutchings * descriptor fetch 7649f2cb71cSBen Hutchings */ 7659f2cb71cSBen Hutchings efx_for_each_channel(channel, efx) { 7669f2cb71cSBen Hutchings efx_for_each_channel_rx_queue(rx_queue, channel) { 7679f2cb71cSBen Hutchings if (atomic_read(&efx->rxq_flush_outstanding) >= 7689f2cb71cSBen Hutchings EFX_RX_FLUSH_COUNT) 7699f2cb71cSBen Hutchings break; 7709f2cb71cSBen Hutchings 7719f2cb71cSBen Hutchings if (rx_queue->flush_pending) { 7729f2cb71cSBen Hutchings rx_queue->flush_pending = false; 7739f2cb71cSBen Hutchings atomic_dec(&efx->rxq_flush_pending); 7749f2cb71cSBen Hutchings atomic_inc(&efx->rxq_flush_outstanding); 7759f2cb71cSBen Hutchings efx_flush_rx_queue(rx_queue); 7769f2cb71cSBen Hutchings } 7779f2cb71cSBen Hutchings } 7789f2cb71cSBen Hutchings } 7799f2cb71cSBen Hutchings 780cd2d5b52SBen Hutchings wait: 7819f2cb71cSBen Hutchings timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx), 7829f2cb71cSBen Hutchings timeout); 7839f2cb71cSBen Hutchings } 7849f2cb71cSBen Hutchings 785525d9e82SDaniel Pieczko if (atomic_read(&efx->drain_pending) && 786525d9e82SDaniel Pieczko !efx_check_tx_flush_complete(efx)) { 7879f2cb71cSBen Hutchings netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 7889f2cb71cSBen Hutchings "(rx %d+%d)\n", atomic_read(&efx->drain_pending), 7899f2cb71cSBen Hutchings atomic_read(&efx->rxq_flush_outstanding), 7909f2cb71cSBen Hutchings atomic_read(&efx->rxq_flush_pending)); 7919f2cb71cSBen Hutchings rc = -ETIMEDOUT; 7929f2cb71cSBen Hutchings 7939f2cb71cSBen Hutchings atomic_set(&efx->drain_pending, 0); 7949f2cb71cSBen Hutchings atomic_set(&efx->rxq_flush_pending, 0); 7959f2cb71cSBen Hutchings atomic_set(&efx->rxq_flush_outstanding, 0); 7969f2cb71cSBen Hutchings } 7979f2cb71cSBen Hutchings 798d5e8cc6cSBen Hutchings efx->type->finish_flush(efx); 799a606f432SSteve Hodgson 8009f2cb71cSBen Hutchings return rc; 8019f2cb71cSBen Hutchings } 8029f2cb71cSBen Hutchings 8039f2cb71cSBen Hutchings /************************************************************************** 8049f2cb71cSBen Hutchings * 805874aeea5SJeff Kirsher * Event queue processing 806874aeea5SJeff Kirsher * Event queues are processed by per-channel tasklets. 807874aeea5SJeff Kirsher * 808874aeea5SJeff Kirsher **************************************************************************/ 809874aeea5SJeff Kirsher 810874aeea5SJeff Kirsher /* Update a channel's event queue's read pointer (RPTR) register 811874aeea5SJeff Kirsher * 812874aeea5SJeff Kirsher * This writes the EVQ_RPTR_REG register for the specified channel's 813874aeea5SJeff Kirsher * event queue. 814874aeea5SJeff Kirsher */ 815874aeea5SJeff Kirsher void efx_nic_eventq_read_ack(struct efx_channel *channel) 816874aeea5SJeff Kirsher { 817874aeea5SJeff Kirsher efx_dword_t reg; 818874aeea5SJeff Kirsher struct efx_nic *efx = channel->efx; 819874aeea5SJeff Kirsher 820874aeea5SJeff Kirsher EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 821874aeea5SJeff Kirsher channel->eventq_read_ptr & channel->eventq_mask); 822778cdaf6SBen Hutchings 823778cdaf6SBen Hutchings /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size 824778cdaf6SBen Hutchings * of 4 bytes, but it is really 16 bytes just like later revisions. 825778cdaf6SBen Hutchings */ 826778cdaf6SBen Hutchings efx_writed(efx, ®, 827778cdaf6SBen Hutchings efx->type->evq_rptr_tbl_base + 828778cdaf6SBen Hutchings FR_BZ_EVQ_RPTR_STEP * channel->channel); 829874aeea5SJeff Kirsher } 830874aeea5SJeff Kirsher 831874aeea5SJeff Kirsher /* Use HW to insert a SW defined event */ 83290893000SBen Hutchings void efx_generate_event(struct efx_nic *efx, unsigned int evq, 83390893000SBen Hutchings efx_qword_t *event) 834874aeea5SJeff Kirsher { 835874aeea5SJeff Kirsher efx_oword_t drv_ev_reg; 836874aeea5SJeff Kirsher 837874aeea5SJeff Kirsher BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 838874aeea5SJeff Kirsher FRF_AZ_DRV_EV_DATA_WIDTH != 64); 839874aeea5SJeff Kirsher drv_ev_reg.u32[0] = event->u32[0]; 840874aeea5SJeff Kirsher drv_ev_reg.u32[1] = event->u32[1]; 841874aeea5SJeff Kirsher drv_ev_reg.u32[2] = 0; 842874aeea5SJeff Kirsher drv_ev_reg.u32[3] = 0; 84390893000SBen Hutchings EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); 84490893000SBen Hutchings efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); 845874aeea5SJeff Kirsher } 846874aeea5SJeff Kirsher 8474ef594ebSBen Hutchings static void efx_magic_event(struct efx_channel *channel, u32 magic) 8484ef594ebSBen Hutchings { 8494ef594ebSBen Hutchings efx_qword_t event; 8504ef594ebSBen Hutchings 8514ef594ebSBen Hutchings EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 8524ef594ebSBen Hutchings FSE_AZ_EV_CODE_DRV_GEN_EV, 8534ef594ebSBen Hutchings FSF_AZ_DRV_GEN_EV_MAGIC, magic); 85490893000SBen Hutchings efx_generate_event(channel->efx, channel->channel, &event); 8554ef594ebSBen Hutchings } 8564ef594ebSBen Hutchings 857874aeea5SJeff Kirsher /* Handle a transmit completion event 858874aeea5SJeff Kirsher * 859874aeea5SJeff Kirsher * The NIC batches TX completion events; the message we receive is of 860874aeea5SJeff Kirsher * the form "complete all TX events up to this index". 861874aeea5SJeff Kirsher */ 862874aeea5SJeff Kirsher static int 863874aeea5SJeff Kirsher efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 864874aeea5SJeff Kirsher { 865874aeea5SJeff Kirsher unsigned int tx_ev_desc_ptr; 866874aeea5SJeff Kirsher unsigned int tx_ev_q_label; 867874aeea5SJeff Kirsher struct efx_tx_queue *tx_queue; 868874aeea5SJeff Kirsher struct efx_nic *efx = channel->efx; 869874aeea5SJeff Kirsher int tx_packets = 0; 870874aeea5SJeff Kirsher 8719f2cb71cSBen Hutchings if (unlikely(ACCESS_ONCE(efx->reset_pending))) 8729f2cb71cSBen Hutchings return 0; 8739f2cb71cSBen Hutchings 874874aeea5SJeff Kirsher if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 875874aeea5SJeff Kirsher /* Transmit completion */ 876874aeea5SJeff Kirsher tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 877874aeea5SJeff Kirsher tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 878874aeea5SJeff Kirsher tx_queue = efx_channel_get_tx_queue( 879874aeea5SJeff Kirsher channel, tx_ev_q_label % EFX_TXQ_TYPES); 880874aeea5SJeff Kirsher tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 881874aeea5SJeff Kirsher tx_queue->ptr_mask); 882874aeea5SJeff Kirsher efx_xmit_done(tx_queue, tx_ev_desc_ptr); 883874aeea5SJeff Kirsher } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 884874aeea5SJeff Kirsher /* Rewrite the FIFO write pointer */ 885874aeea5SJeff Kirsher tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 886874aeea5SJeff Kirsher tx_queue = efx_channel_get_tx_queue( 887874aeea5SJeff Kirsher channel, tx_ev_q_label % EFX_TXQ_TYPES); 888874aeea5SJeff Kirsher 889874aeea5SJeff Kirsher netif_tx_lock(efx->net_dev); 890874aeea5SJeff Kirsher efx_notify_tx_desc(tx_queue); 891874aeea5SJeff Kirsher netif_tx_unlock(efx->net_dev); 892874aeea5SJeff Kirsher } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && 893874aeea5SJeff Kirsher EFX_WORKAROUND_10727(efx)) { 894874aeea5SJeff Kirsher efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 895874aeea5SJeff Kirsher } else { 896874aeea5SJeff Kirsher netif_err(efx, tx_err, efx->net_dev, 897874aeea5SJeff Kirsher "channel %d unexpected TX event " 898874aeea5SJeff Kirsher EFX_QWORD_FMT"\n", channel->channel, 899874aeea5SJeff Kirsher EFX_QWORD_VAL(*event)); 900874aeea5SJeff Kirsher } 901874aeea5SJeff Kirsher 902874aeea5SJeff Kirsher return tx_packets; 903874aeea5SJeff Kirsher } 904874aeea5SJeff Kirsher 905874aeea5SJeff Kirsher /* Detect errors included in the rx_evt_pkt_ok bit. */ 906db339569SBen Hutchings static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 907db339569SBen Hutchings const efx_qword_t *event) 908874aeea5SJeff Kirsher { 909874aeea5SJeff Kirsher struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 910874aeea5SJeff Kirsher struct efx_nic *efx = rx_queue->efx; 911874aeea5SJeff Kirsher bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 912874aeea5SJeff Kirsher bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 913874aeea5SJeff Kirsher bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 914874aeea5SJeff Kirsher bool rx_ev_other_err, rx_ev_pause_frm; 915874aeea5SJeff Kirsher bool rx_ev_hdr_type, rx_ev_mcast_pkt; 916874aeea5SJeff Kirsher unsigned rx_ev_pkt_type; 917874aeea5SJeff Kirsher 918874aeea5SJeff Kirsher rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 919874aeea5SJeff Kirsher rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 920874aeea5SJeff Kirsher rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 921874aeea5SJeff Kirsher rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 922874aeea5SJeff Kirsher rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 923874aeea5SJeff Kirsher FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 924874aeea5SJeff Kirsher rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 925874aeea5SJeff Kirsher FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 926874aeea5SJeff Kirsher rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 927874aeea5SJeff Kirsher FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 928874aeea5SJeff Kirsher rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 929874aeea5SJeff Kirsher rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 930874aeea5SJeff Kirsher rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 931874aeea5SJeff Kirsher 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 932874aeea5SJeff Kirsher rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 933874aeea5SJeff Kirsher 934874aeea5SJeff Kirsher /* Every error apart from tobe_disc and pause_frm */ 935874aeea5SJeff Kirsher rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 936874aeea5SJeff Kirsher rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 937874aeea5SJeff Kirsher rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 938874aeea5SJeff Kirsher 939874aeea5SJeff Kirsher /* Count errors that are not in MAC stats. Ignore expected 940874aeea5SJeff Kirsher * checksum errors during self-test. */ 941874aeea5SJeff Kirsher if (rx_ev_frm_trunc) 942874aeea5SJeff Kirsher ++channel->n_rx_frm_trunc; 943874aeea5SJeff Kirsher else if (rx_ev_tobe_disc) 944874aeea5SJeff Kirsher ++channel->n_rx_tobe_disc; 945874aeea5SJeff Kirsher else if (!efx->loopback_selftest) { 946874aeea5SJeff Kirsher if (rx_ev_ip_hdr_chksum_err) 947874aeea5SJeff Kirsher ++channel->n_rx_ip_hdr_chksum_err; 948874aeea5SJeff Kirsher else if (rx_ev_tcp_udp_chksum_err) 949874aeea5SJeff Kirsher ++channel->n_rx_tcp_udp_chksum_err; 950874aeea5SJeff Kirsher } 951874aeea5SJeff Kirsher 952874aeea5SJeff Kirsher /* TOBE_DISC is expected on unicast mismatches; don't print out an 953874aeea5SJeff Kirsher * error message. FRM_TRUNC indicates RXDP dropped the packet due 954874aeea5SJeff Kirsher * to a FIFO overflow. 955874aeea5SJeff Kirsher */ 9565f3f9d6cSBen Hutchings #ifdef DEBUG 957874aeea5SJeff Kirsher if (rx_ev_other_err && net_ratelimit()) { 958874aeea5SJeff Kirsher netif_dbg(efx, rx_err, efx->net_dev, 959874aeea5SJeff Kirsher " RX queue %d unexpected RX event " 960874aeea5SJeff Kirsher EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 961874aeea5SJeff Kirsher efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 962874aeea5SJeff Kirsher rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 963874aeea5SJeff Kirsher rx_ev_ip_hdr_chksum_err ? 964874aeea5SJeff Kirsher " [IP_HDR_CHKSUM_ERR]" : "", 965874aeea5SJeff Kirsher rx_ev_tcp_udp_chksum_err ? 966874aeea5SJeff Kirsher " [TCP_UDP_CHKSUM_ERR]" : "", 967874aeea5SJeff Kirsher rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 968874aeea5SJeff Kirsher rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 969874aeea5SJeff Kirsher rx_ev_drib_nib ? " [DRIB_NIB]" : "", 970874aeea5SJeff Kirsher rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 971874aeea5SJeff Kirsher rx_ev_pause_frm ? " [PAUSE]" : ""); 972874aeea5SJeff Kirsher } 973874aeea5SJeff Kirsher #endif 974db339569SBen Hutchings 975db339569SBen Hutchings /* The frame must be discarded if any of these are true. */ 976db339569SBen Hutchings return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 977db339569SBen Hutchings rx_ev_tobe_disc | rx_ev_pause_frm) ? 978db339569SBen Hutchings EFX_RX_PKT_DISCARD : 0; 979874aeea5SJeff Kirsher } 980874aeea5SJeff Kirsher 98185740cdfSBen Hutchings /* Handle receive events that are not in-order. Return true if this 98285740cdfSBen Hutchings * can be handled as a partial packet discard, false if it's more 98385740cdfSBen Hutchings * serious. 98485740cdfSBen Hutchings */ 98585740cdfSBen Hutchings static bool 986874aeea5SJeff Kirsher efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 987874aeea5SJeff Kirsher { 98885740cdfSBen Hutchings struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 989874aeea5SJeff Kirsher struct efx_nic *efx = rx_queue->efx; 990874aeea5SJeff Kirsher unsigned expected, dropped; 991874aeea5SJeff Kirsher 99285740cdfSBen Hutchings if (rx_queue->scatter_n && 99385740cdfSBen Hutchings index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & 99485740cdfSBen Hutchings rx_queue->ptr_mask)) { 99585740cdfSBen Hutchings ++channel->n_rx_nodesc_trunc; 99685740cdfSBen Hutchings return true; 99785740cdfSBen Hutchings } 99885740cdfSBen Hutchings 999874aeea5SJeff Kirsher expected = rx_queue->removed_count & rx_queue->ptr_mask; 1000874aeea5SJeff Kirsher dropped = (index - expected) & rx_queue->ptr_mask; 1001874aeea5SJeff Kirsher netif_info(efx, rx_err, efx->net_dev, 1002874aeea5SJeff Kirsher "dropped %d events (index=%d expected=%d)\n", 1003874aeea5SJeff Kirsher dropped, index, expected); 1004874aeea5SJeff Kirsher 1005874aeea5SJeff Kirsher efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 1006874aeea5SJeff Kirsher RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 100785740cdfSBen Hutchings return false; 1008874aeea5SJeff Kirsher } 1009874aeea5SJeff Kirsher 1010874aeea5SJeff Kirsher /* Handle a packet received event 1011874aeea5SJeff Kirsher * 1012874aeea5SJeff Kirsher * The NIC gives a "discard" flag if it's a unicast packet with the 1013874aeea5SJeff Kirsher * wrong destination address 1014874aeea5SJeff Kirsher * Also "is multicast" and "matches multicast filter" flags can be used to 1015874aeea5SJeff Kirsher * discard non-matching multicast packets. 1016874aeea5SJeff Kirsher */ 1017874aeea5SJeff Kirsher static void 1018874aeea5SJeff Kirsher efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 1019874aeea5SJeff Kirsher { 1020874aeea5SJeff Kirsher unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 1021874aeea5SJeff Kirsher unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 1022874aeea5SJeff Kirsher unsigned expected_ptr; 102385740cdfSBen Hutchings bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; 1024db339569SBen Hutchings u16 flags; 1025874aeea5SJeff Kirsher struct efx_rx_queue *rx_queue; 10269f2cb71cSBen Hutchings struct efx_nic *efx = channel->efx; 10279f2cb71cSBen Hutchings 10289f2cb71cSBen Hutchings if (unlikely(ACCESS_ONCE(efx->reset_pending))) 10299f2cb71cSBen Hutchings return; 1030874aeea5SJeff Kirsher 103185740cdfSBen Hutchings rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); 103285740cdfSBen Hutchings rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); 1033874aeea5SJeff Kirsher WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 1034874aeea5SJeff Kirsher channel->channel); 1035874aeea5SJeff Kirsher 1036874aeea5SJeff Kirsher rx_queue = efx_channel_get_rx_queue(channel); 1037874aeea5SJeff Kirsher 1038874aeea5SJeff Kirsher rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 103985740cdfSBen Hutchings expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & 104085740cdfSBen Hutchings rx_queue->ptr_mask); 104185740cdfSBen Hutchings 104285740cdfSBen Hutchings /* Check for partial drops and other errors */ 104385740cdfSBen Hutchings if (unlikely(rx_ev_desc_ptr != expected_ptr) || 104485740cdfSBen Hutchings unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { 104585740cdfSBen Hutchings if (rx_ev_desc_ptr != expected_ptr && 104685740cdfSBen Hutchings !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) 104785740cdfSBen Hutchings return; 104885740cdfSBen Hutchings 104985740cdfSBen Hutchings /* Discard all pending fragments */ 105085740cdfSBen Hutchings if (rx_queue->scatter_n) { 105185740cdfSBen Hutchings efx_rx_packet( 105285740cdfSBen Hutchings rx_queue, 105385740cdfSBen Hutchings rx_queue->removed_count & rx_queue->ptr_mask, 105485740cdfSBen Hutchings rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); 105585740cdfSBen Hutchings rx_queue->removed_count += rx_queue->scatter_n; 105685740cdfSBen Hutchings rx_queue->scatter_n = 0; 105785740cdfSBen Hutchings } 105885740cdfSBen Hutchings 105985740cdfSBen Hutchings /* Return if there is no new fragment */ 106085740cdfSBen Hutchings if (rx_ev_desc_ptr != expected_ptr) 106185740cdfSBen Hutchings return; 106285740cdfSBen Hutchings 106385740cdfSBen Hutchings /* Discard new fragment if not SOP */ 106485740cdfSBen Hutchings if (!rx_ev_sop) { 106585740cdfSBen Hutchings efx_rx_packet( 106685740cdfSBen Hutchings rx_queue, 106785740cdfSBen Hutchings rx_queue->removed_count & rx_queue->ptr_mask, 106885740cdfSBen Hutchings 1, 0, EFX_RX_PKT_DISCARD); 106985740cdfSBen Hutchings ++rx_queue->removed_count; 107085740cdfSBen Hutchings return; 107185740cdfSBen Hutchings } 107285740cdfSBen Hutchings } 107385740cdfSBen Hutchings 107485740cdfSBen Hutchings ++rx_queue->scatter_n; 107585740cdfSBen Hutchings if (rx_ev_cont) 107685740cdfSBen Hutchings return; 107785740cdfSBen Hutchings 107885740cdfSBen Hutchings rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 107985740cdfSBen Hutchings rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 108085740cdfSBen Hutchings rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 1081874aeea5SJeff Kirsher 1082874aeea5SJeff Kirsher if (likely(rx_ev_pkt_ok)) { 1083874aeea5SJeff Kirsher /* If packet is marked as OK and packet type is TCP/IP or 1084874aeea5SJeff Kirsher * UDP/IP, then we can rely on the hardware checksum. 1085874aeea5SJeff Kirsher */ 1086db339569SBen Hutchings flags = (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || 1087db339569SBen Hutchings rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP) ? 1088db339569SBen Hutchings EFX_RX_PKT_CSUMMED : 0; 1089874aeea5SJeff Kirsher } else { 1090db339569SBen Hutchings flags = efx_handle_rx_not_ok(rx_queue, event); 1091874aeea5SJeff Kirsher } 1092874aeea5SJeff Kirsher 1093874aeea5SJeff Kirsher /* Detect multicast packets that didn't match the filter */ 1094874aeea5SJeff Kirsher rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 1095874aeea5SJeff Kirsher if (rx_ev_mcast_pkt) { 1096874aeea5SJeff Kirsher unsigned int rx_ev_mcast_hash_match = 1097874aeea5SJeff Kirsher EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 1098874aeea5SJeff Kirsher 1099874aeea5SJeff Kirsher if (unlikely(!rx_ev_mcast_hash_match)) { 1100874aeea5SJeff Kirsher ++channel->n_rx_mcast_mismatch; 1101db339569SBen Hutchings flags |= EFX_RX_PKT_DISCARD; 1102874aeea5SJeff Kirsher } 1103874aeea5SJeff Kirsher } 1104874aeea5SJeff Kirsher 1105874aeea5SJeff Kirsher channel->irq_mod_score += 2; 1106874aeea5SJeff Kirsher 1107874aeea5SJeff Kirsher /* Handle received packet */ 110885740cdfSBen Hutchings efx_rx_packet(rx_queue, 110985740cdfSBen Hutchings rx_queue->removed_count & rx_queue->ptr_mask, 111085740cdfSBen Hutchings rx_queue->scatter_n, rx_ev_byte_cnt, flags); 111185740cdfSBen Hutchings rx_queue->removed_count += rx_queue->scatter_n; 111285740cdfSBen Hutchings rx_queue->scatter_n = 0; 1113874aeea5SJeff Kirsher } 1114874aeea5SJeff Kirsher 11159f2cb71cSBen Hutchings /* If this flush done event corresponds to a &struct efx_tx_queue, then 11169f2cb71cSBen Hutchings * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue 11179f2cb71cSBen Hutchings * of all transmit completions. 11189f2cb71cSBen Hutchings */ 11199f2cb71cSBen Hutchings static void 11209f2cb71cSBen Hutchings efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) 11219f2cb71cSBen Hutchings { 11229f2cb71cSBen Hutchings struct efx_tx_queue *tx_queue; 11239f2cb71cSBen Hutchings int qid; 11249f2cb71cSBen Hutchings 11259f2cb71cSBen Hutchings qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 11269f2cb71cSBen Hutchings if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { 11279f2cb71cSBen Hutchings tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, 11289f2cb71cSBen Hutchings qid % EFX_TXQ_TYPES); 1129525d9e82SDaniel Pieczko if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { 11309f2cb71cSBen Hutchings efx_magic_event(tx_queue->channel, 11319f2cb71cSBen Hutchings EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 11329f2cb71cSBen Hutchings } 11339f2cb71cSBen Hutchings } 1134525d9e82SDaniel Pieczko } 11359f2cb71cSBen Hutchings 11369f2cb71cSBen Hutchings /* If this flush done event corresponds to a &struct efx_rx_queue: If the flush 11379f2cb71cSBen Hutchings * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add 11389f2cb71cSBen Hutchings * the RX queue back to the mask of RX queues in need of flushing. 11399f2cb71cSBen Hutchings */ 11409f2cb71cSBen Hutchings static void 11419f2cb71cSBen Hutchings efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) 11429f2cb71cSBen Hutchings { 11439f2cb71cSBen Hutchings struct efx_channel *channel; 11449f2cb71cSBen Hutchings struct efx_rx_queue *rx_queue; 11459f2cb71cSBen Hutchings int qid; 11469f2cb71cSBen Hutchings bool failed; 11479f2cb71cSBen Hutchings 11489f2cb71cSBen Hutchings qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 11499f2cb71cSBen Hutchings failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 11509f2cb71cSBen Hutchings if (qid >= efx->n_channels) 11519f2cb71cSBen Hutchings return; 11529f2cb71cSBen Hutchings channel = efx_get_channel(efx, qid); 11539f2cb71cSBen Hutchings if (!efx_channel_has_rx_queue(channel)) 11549f2cb71cSBen Hutchings return; 11559f2cb71cSBen Hutchings rx_queue = efx_channel_get_rx_queue(channel); 11569f2cb71cSBen Hutchings 11579f2cb71cSBen Hutchings if (failed) { 11589f2cb71cSBen Hutchings netif_info(efx, hw, efx->net_dev, 11599f2cb71cSBen Hutchings "RXQ %d flush retry\n", qid); 11609f2cb71cSBen Hutchings rx_queue->flush_pending = true; 11619f2cb71cSBen Hutchings atomic_inc(&efx->rxq_flush_pending); 11629f2cb71cSBen Hutchings } else { 11639f2cb71cSBen Hutchings efx_magic_event(efx_rx_queue_channel(rx_queue), 11649f2cb71cSBen Hutchings EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); 11659f2cb71cSBen Hutchings } 11669f2cb71cSBen Hutchings atomic_dec(&efx->rxq_flush_outstanding); 11679f2cb71cSBen Hutchings if (efx_flush_wake(efx)) 11689f2cb71cSBen Hutchings wake_up(&efx->flush_wq); 11699f2cb71cSBen Hutchings } 11709f2cb71cSBen Hutchings 11719f2cb71cSBen Hutchings static void 11729f2cb71cSBen Hutchings efx_handle_drain_event(struct efx_channel *channel) 11739f2cb71cSBen Hutchings { 11749f2cb71cSBen Hutchings struct efx_nic *efx = channel->efx; 11759f2cb71cSBen Hutchings 11769f2cb71cSBen Hutchings WARN_ON(atomic_read(&efx->drain_pending) == 0); 11779f2cb71cSBen Hutchings atomic_dec(&efx->drain_pending); 11789f2cb71cSBen Hutchings if (efx_flush_wake(efx)) 11799f2cb71cSBen Hutchings wake_up(&efx->flush_wq); 11809f2cb71cSBen Hutchings } 11819f2cb71cSBen Hutchings 1182874aeea5SJeff Kirsher static void 1183874aeea5SJeff Kirsher efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) 1184874aeea5SJeff Kirsher { 1185874aeea5SJeff Kirsher struct efx_nic *efx = channel->efx; 11862ae75dacSBen Hutchings struct efx_rx_queue *rx_queue = 11872ae75dacSBen Hutchings efx_channel_has_rx_queue(channel) ? 11882ae75dacSBen Hutchings efx_channel_get_rx_queue(channel) : NULL; 11899f2cb71cSBen Hutchings unsigned magic, code; 1190874aeea5SJeff Kirsher 11914ef594ebSBen Hutchings magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 11929f2cb71cSBen Hutchings code = _EFX_CHANNEL_MAGIC_CODE(magic); 11934ef594ebSBen Hutchings 11949f2cb71cSBen Hutchings if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { 1195dd40781eSBen Hutchings channel->event_test_cpu = raw_smp_processor_id(); 11969f2cb71cSBen Hutchings } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { 1197874aeea5SJeff Kirsher /* The queue must be empty, so we won't receive any rx 1198874aeea5SJeff Kirsher * events, so efx_process_channel() won't refill the 1199874aeea5SJeff Kirsher * queue. Refill it here */ 12002ae75dacSBen Hutchings efx_fast_push_rx_descriptors(rx_queue); 12019f2cb71cSBen Hutchings } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { 12029f2cb71cSBen Hutchings rx_queue->enabled = false; 12039f2cb71cSBen Hutchings efx_handle_drain_event(channel); 12049f2cb71cSBen Hutchings } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { 12059f2cb71cSBen Hutchings efx_handle_drain_event(channel); 12069f2cb71cSBen Hutchings } else { 1207874aeea5SJeff Kirsher netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1208874aeea5SJeff Kirsher "generated event "EFX_QWORD_FMT"\n", 1209874aeea5SJeff Kirsher channel->channel, EFX_QWORD_VAL(*event)); 1210874aeea5SJeff Kirsher } 12119f2cb71cSBen Hutchings } 1212874aeea5SJeff Kirsher 1213874aeea5SJeff Kirsher static void 1214874aeea5SJeff Kirsher efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 1215874aeea5SJeff Kirsher { 1216874aeea5SJeff Kirsher struct efx_nic *efx = channel->efx; 1217874aeea5SJeff Kirsher unsigned int ev_sub_code; 1218874aeea5SJeff Kirsher unsigned int ev_sub_data; 1219874aeea5SJeff Kirsher 1220874aeea5SJeff Kirsher ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 1221874aeea5SJeff Kirsher ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1222874aeea5SJeff Kirsher 1223874aeea5SJeff Kirsher switch (ev_sub_code) { 1224874aeea5SJeff Kirsher case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1225874aeea5SJeff Kirsher netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1226874aeea5SJeff Kirsher channel->channel, ev_sub_data); 12279f2cb71cSBen Hutchings efx_handle_tx_flush_done(efx, event); 1228cd2d5b52SBen Hutchings efx_sriov_tx_flush_done(efx, event); 1229874aeea5SJeff Kirsher break; 1230874aeea5SJeff Kirsher case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1231874aeea5SJeff Kirsher netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1232874aeea5SJeff Kirsher channel->channel, ev_sub_data); 12339f2cb71cSBen Hutchings efx_handle_rx_flush_done(efx, event); 1234cd2d5b52SBen Hutchings efx_sriov_rx_flush_done(efx, event); 1235874aeea5SJeff Kirsher break; 1236874aeea5SJeff Kirsher case FSE_AZ_EVQ_INIT_DONE_EV: 1237874aeea5SJeff Kirsher netif_dbg(efx, hw, efx->net_dev, 1238874aeea5SJeff Kirsher "channel %d EVQ %d initialised\n", 1239874aeea5SJeff Kirsher channel->channel, ev_sub_data); 1240874aeea5SJeff Kirsher break; 1241874aeea5SJeff Kirsher case FSE_AZ_SRM_UPD_DONE_EV: 1242874aeea5SJeff Kirsher netif_vdbg(efx, hw, efx->net_dev, 1243874aeea5SJeff Kirsher "channel %d SRAM update done\n", channel->channel); 1244874aeea5SJeff Kirsher break; 1245874aeea5SJeff Kirsher case FSE_AZ_WAKE_UP_EV: 1246874aeea5SJeff Kirsher netif_vdbg(efx, hw, efx->net_dev, 1247874aeea5SJeff Kirsher "channel %d RXQ %d wakeup event\n", 1248874aeea5SJeff Kirsher channel->channel, ev_sub_data); 1249874aeea5SJeff Kirsher break; 1250874aeea5SJeff Kirsher case FSE_AZ_TIMER_EV: 1251874aeea5SJeff Kirsher netif_vdbg(efx, hw, efx->net_dev, 1252874aeea5SJeff Kirsher "channel %d RX queue %d timer expired\n", 1253874aeea5SJeff Kirsher channel->channel, ev_sub_data); 1254874aeea5SJeff Kirsher break; 1255874aeea5SJeff Kirsher case FSE_AA_RX_RECOVER_EV: 1256874aeea5SJeff Kirsher netif_err(efx, rx_err, efx->net_dev, 1257874aeea5SJeff Kirsher "channel %d seen DRIVER RX_RESET event. " 1258874aeea5SJeff Kirsher "Resetting.\n", channel->channel); 1259874aeea5SJeff Kirsher atomic_inc(&efx->rx_reset); 1260874aeea5SJeff Kirsher efx_schedule_reset(efx, 1261874aeea5SJeff Kirsher EFX_WORKAROUND_6555(efx) ? 1262874aeea5SJeff Kirsher RESET_TYPE_RX_RECOVERY : 1263874aeea5SJeff Kirsher RESET_TYPE_DISABLE); 1264874aeea5SJeff Kirsher break; 1265874aeea5SJeff Kirsher case FSE_BZ_RX_DSC_ERROR_EV: 1266cd2d5b52SBen Hutchings if (ev_sub_data < EFX_VI_BASE) { 1267874aeea5SJeff Kirsher netif_err(efx, rx_err, efx->net_dev, 1268874aeea5SJeff Kirsher "RX DMA Q %d reports descriptor fetch error." 1269cd2d5b52SBen Hutchings " RX Q %d is disabled.\n", ev_sub_data, 1270cd2d5b52SBen Hutchings ev_sub_data); 1271874aeea5SJeff Kirsher efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 1272cd2d5b52SBen Hutchings } else 1273cd2d5b52SBen Hutchings efx_sriov_desc_fetch_err(efx, ev_sub_data); 1274874aeea5SJeff Kirsher break; 1275874aeea5SJeff Kirsher case FSE_BZ_TX_DSC_ERROR_EV: 1276cd2d5b52SBen Hutchings if (ev_sub_data < EFX_VI_BASE) { 1277874aeea5SJeff Kirsher netif_err(efx, tx_err, efx->net_dev, 1278874aeea5SJeff Kirsher "TX DMA Q %d reports descriptor fetch error." 1279cd2d5b52SBen Hutchings " TX Q %d is disabled.\n", ev_sub_data, 1280cd2d5b52SBen Hutchings ev_sub_data); 1281874aeea5SJeff Kirsher efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 1282cd2d5b52SBen Hutchings } else 1283cd2d5b52SBen Hutchings efx_sriov_desc_fetch_err(efx, ev_sub_data); 1284874aeea5SJeff Kirsher break; 1285874aeea5SJeff Kirsher default: 1286874aeea5SJeff Kirsher netif_vdbg(efx, hw, efx->net_dev, 1287874aeea5SJeff Kirsher "channel %d unknown driver event code %d " 1288874aeea5SJeff Kirsher "data %04x\n", channel->channel, ev_sub_code, 1289874aeea5SJeff Kirsher ev_sub_data); 1290874aeea5SJeff Kirsher break; 1291874aeea5SJeff Kirsher } 1292874aeea5SJeff Kirsher } 1293874aeea5SJeff Kirsher 1294874aeea5SJeff Kirsher int efx_nic_process_eventq(struct efx_channel *channel, int budget) 1295874aeea5SJeff Kirsher { 1296874aeea5SJeff Kirsher struct efx_nic *efx = channel->efx; 1297874aeea5SJeff Kirsher unsigned int read_ptr; 1298874aeea5SJeff Kirsher efx_qword_t event, *p_event; 1299874aeea5SJeff Kirsher int ev_code; 1300874aeea5SJeff Kirsher int tx_packets = 0; 1301874aeea5SJeff Kirsher int spent = 0; 1302874aeea5SJeff Kirsher 1303874aeea5SJeff Kirsher read_ptr = channel->eventq_read_ptr; 1304874aeea5SJeff Kirsher 1305874aeea5SJeff Kirsher for (;;) { 1306874aeea5SJeff Kirsher p_event = efx_event(channel, read_ptr); 1307874aeea5SJeff Kirsher event = *p_event; 1308874aeea5SJeff Kirsher 1309874aeea5SJeff Kirsher if (!efx_event_present(&event)) 1310874aeea5SJeff Kirsher /* End of events */ 1311874aeea5SJeff Kirsher break; 1312874aeea5SJeff Kirsher 1313874aeea5SJeff Kirsher netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1314874aeea5SJeff Kirsher "channel %d event is "EFX_QWORD_FMT"\n", 1315874aeea5SJeff Kirsher channel->channel, EFX_QWORD_VAL(event)); 1316874aeea5SJeff Kirsher 1317874aeea5SJeff Kirsher /* Clear this event by marking it all ones */ 1318874aeea5SJeff Kirsher EFX_SET_QWORD(*p_event); 1319874aeea5SJeff Kirsher 1320874aeea5SJeff Kirsher ++read_ptr; 1321874aeea5SJeff Kirsher 1322874aeea5SJeff Kirsher ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1323874aeea5SJeff Kirsher 1324874aeea5SJeff Kirsher switch (ev_code) { 1325874aeea5SJeff Kirsher case FSE_AZ_EV_CODE_RX_EV: 1326874aeea5SJeff Kirsher efx_handle_rx_event(channel, &event); 1327874aeea5SJeff Kirsher if (++spent == budget) 1328874aeea5SJeff Kirsher goto out; 1329874aeea5SJeff Kirsher break; 1330874aeea5SJeff Kirsher case FSE_AZ_EV_CODE_TX_EV: 1331874aeea5SJeff Kirsher tx_packets += efx_handle_tx_event(channel, &event); 1332874aeea5SJeff Kirsher if (tx_packets > efx->txq_entries) { 1333874aeea5SJeff Kirsher spent = budget; 1334874aeea5SJeff Kirsher goto out; 1335874aeea5SJeff Kirsher } 1336874aeea5SJeff Kirsher break; 1337874aeea5SJeff Kirsher case FSE_AZ_EV_CODE_DRV_GEN_EV: 1338874aeea5SJeff Kirsher efx_handle_generated_event(channel, &event); 1339874aeea5SJeff Kirsher break; 1340874aeea5SJeff Kirsher case FSE_AZ_EV_CODE_DRIVER_EV: 1341874aeea5SJeff Kirsher efx_handle_driver_event(channel, &event); 1342874aeea5SJeff Kirsher break; 1343cd2d5b52SBen Hutchings case FSE_CZ_EV_CODE_USER_EV: 1344cd2d5b52SBen Hutchings efx_sriov_event(channel, &event); 1345cd2d5b52SBen Hutchings break; 1346874aeea5SJeff Kirsher case FSE_CZ_EV_CODE_MCDI_EV: 1347874aeea5SJeff Kirsher efx_mcdi_process_event(channel, &event); 1348874aeea5SJeff Kirsher break; 1349874aeea5SJeff Kirsher case FSE_AZ_EV_CODE_GLOBAL_EV: 1350874aeea5SJeff Kirsher if (efx->type->handle_global_event && 1351874aeea5SJeff Kirsher efx->type->handle_global_event(channel, &event)) 1352874aeea5SJeff Kirsher break; 1353874aeea5SJeff Kirsher /* else fall through */ 1354874aeea5SJeff Kirsher default: 1355874aeea5SJeff Kirsher netif_err(channel->efx, hw, channel->efx->net_dev, 1356874aeea5SJeff Kirsher "channel %d unknown event type %d (data " 1357874aeea5SJeff Kirsher EFX_QWORD_FMT ")\n", channel->channel, 1358874aeea5SJeff Kirsher ev_code, EFX_QWORD_VAL(event)); 1359874aeea5SJeff Kirsher } 1360874aeea5SJeff Kirsher } 1361874aeea5SJeff Kirsher 1362874aeea5SJeff Kirsher out: 1363874aeea5SJeff Kirsher channel->eventq_read_ptr = read_ptr; 1364874aeea5SJeff Kirsher return spent; 1365874aeea5SJeff Kirsher } 1366874aeea5SJeff Kirsher 1367874aeea5SJeff Kirsher /* Check whether an event is present in the eventq at the current 1368874aeea5SJeff Kirsher * read pointer. Only useful for self-test. 1369874aeea5SJeff Kirsher */ 1370874aeea5SJeff Kirsher bool efx_nic_event_present(struct efx_channel *channel) 1371874aeea5SJeff Kirsher { 1372874aeea5SJeff Kirsher return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); 1373874aeea5SJeff Kirsher } 1374874aeea5SJeff Kirsher 1375874aeea5SJeff Kirsher /* Allocate buffer table entries for event queue */ 1376874aeea5SJeff Kirsher int efx_nic_probe_eventq(struct efx_channel *channel) 1377874aeea5SJeff Kirsher { 1378874aeea5SJeff Kirsher struct efx_nic *efx = channel->efx; 1379874aeea5SJeff Kirsher unsigned entries; 1380874aeea5SJeff Kirsher 1381874aeea5SJeff Kirsher entries = channel->eventq_mask + 1; 1382874aeea5SJeff Kirsher return efx_alloc_special_buffer(efx, &channel->eventq, 1383874aeea5SJeff Kirsher entries * sizeof(efx_qword_t)); 1384874aeea5SJeff Kirsher } 1385874aeea5SJeff Kirsher 1386874aeea5SJeff Kirsher void efx_nic_init_eventq(struct efx_channel *channel) 1387874aeea5SJeff Kirsher { 1388874aeea5SJeff Kirsher efx_oword_t reg; 1389874aeea5SJeff Kirsher struct efx_nic *efx = channel->efx; 1390874aeea5SJeff Kirsher 1391874aeea5SJeff Kirsher netif_dbg(efx, hw, efx->net_dev, 1392874aeea5SJeff Kirsher "channel %d event queue in special buffers %d-%d\n", 1393874aeea5SJeff Kirsher channel->channel, channel->eventq.index, 1394874aeea5SJeff Kirsher channel->eventq.index + channel->eventq.entries - 1); 1395874aeea5SJeff Kirsher 1396874aeea5SJeff Kirsher if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1397874aeea5SJeff Kirsher EFX_POPULATE_OWORD_3(reg, 1398874aeea5SJeff Kirsher FRF_CZ_TIMER_Q_EN, 1, 1399874aeea5SJeff Kirsher FRF_CZ_HOST_NOTIFY_MODE, 0, 1400874aeea5SJeff Kirsher FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1401874aeea5SJeff Kirsher efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1402874aeea5SJeff Kirsher } 1403874aeea5SJeff Kirsher 1404874aeea5SJeff Kirsher /* Pin event queue buffer */ 1405874aeea5SJeff Kirsher efx_init_special_buffer(efx, &channel->eventq); 1406874aeea5SJeff Kirsher 1407874aeea5SJeff Kirsher /* Fill event queue with all ones (i.e. empty events) */ 1408874aeea5SJeff Kirsher memset(channel->eventq.addr, 0xff, channel->eventq.len); 1409874aeea5SJeff Kirsher 1410874aeea5SJeff Kirsher /* Push event queue to card */ 1411874aeea5SJeff Kirsher EFX_POPULATE_OWORD_3(reg, 1412874aeea5SJeff Kirsher FRF_AZ_EVQ_EN, 1, 1413874aeea5SJeff Kirsher FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1414874aeea5SJeff Kirsher FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1415874aeea5SJeff Kirsher efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1416874aeea5SJeff Kirsher channel->channel); 1417874aeea5SJeff Kirsher 1418874aeea5SJeff Kirsher efx->type->push_irq_moderation(channel); 1419874aeea5SJeff Kirsher } 1420874aeea5SJeff Kirsher 1421874aeea5SJeff Kirsher void efx_nic_fini_eventq(struct efx_channel *channel) 1422874aeea5SJeff Kirsher { 1423874aeea5SJeff Kirsher efx_oword_t reg; 1424874aeea5SJeff Kirsher struct efx_nic *efx = channel->efx; 1425874aeea5SJeff Kirsher 1426874aeea5SJeff Kirsher /* Remove event queue from card */ 1427874aeea5SJeff Kirsher EFX_ZERO_OWORD(reg); 1428874aeea5SJeff Kirsher efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1429874aeea5SJeff Kirsher channel->channel); 1430874aeea5SJeff Kirsher if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1431874aeea5SJeff Kirsher efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1432874aeea5SJeff Kirsher 1433874aeea5SJeff Kirsher /* Unpin event queue */ 1434874aeea5SJeff Kirsher efx_fini_special_buffer(efx, &channel->eventq); 1435874aeea5SJeff Kirsher } 1436874aeea5SJeff Kirsher 1437874aeea5SJeff Kirsher /* Free buffers backing event queue */ 1438874aeea5SJeff Kirsher void efx_nic_remove_eventq(struct efx_channel *channel) 1439874aeea5SJeff Kirsher { 1440874aeea5SJeff Kirsher efx_free_special_buffer(channel->efx, &channel->eventq); 1441874aeea5SJeff Kirsher } 1442874aeea5SJeff Kirsher 1443874aeea5SJeff Kirsher 1444eee6f6a9SBen Hutchings void efx_nic_event_test_start(struct efx_channel *channel) 1445874aeea5SJeff Kirsher { 1446dd40781eSBen Hutchings channel->event_test_cpu = -1; 1447eee6f6a9SBen Hutchings smp_wmb(); 14484ef594ebSBen Hutchings efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 1449874aeea5SJeff Kirsher } 1450874aeea5SJeff Kirsher 14512ae75dacSBen Hutchings void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue) 1452874aeea5SJeff Kirsher { 14532ae75dacSBen Hutchings efx_magic_event(efx_rx_queue_channel(rx_queue), 14542ae75dacSBen Hutchings EFX_CHANNEL_MAGIC_FILL(rx_queue)); 1455874aeea5SJeff Kirsher } 1456874aeea5SJeff Kirsher 1457874aeea5SJeff Kirsher /************************************************************************** 1458874aeea5SJeff Kirsher * 1459874aeea5SJeff Kirsher * Hardware interrupts 1460874aeea5SJeff Kirsher * The hardware interrupt handler does very little work; all the event 1461874aeea5SJeff Kirsher * queue processing is carried out by per-channel tasklets. 1462874aeea5SJeff Kirsher * 1463874aeea5SJeff Kirsher **************************************************************************/ 1464874aeea5SJeff Kirsher 1465874aeea5SJeff Kirsher /* Enable/disable/generate interrupts */ 1466874aeea5SJeff Kirsher static inline void efx_nic_interrupts(struct efx_nic *efx, 1467874aeea5SJeff Kirsher bool enabled, bool force) 1468874aeea5SJeff Kirsher { 1469874aeea5SJeff Kirsher efx_oword_t int_en_reg_ker; 1470874aeea5SJeff Kirsher 1471874aeea5SJeff Kirsher EFX_POPULATE_OWORD_3(int_en_reg_ker, 14721646a6f3SBen Hutchings FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1473874aeea5SJeff Kirsher FRF_AZ_KER_INT_KER, force, 1474874aeea5SJeff Kirsher FRF_AZ_DRV_INT_EN_KER, enabled); 1475874aeea5SJeff Kirsher efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1476874aeea5SJeff Kirsher } 1477874aeea5SJeff Kirsher 1478874aeea5SJeff Kirsher void efx_nic_enable_interrupts(struct efx_nic *efx) 1479874aeea5SJeff Kirsher { 1480874aeea5SJeff Kirsher EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1481874aeea5SJeff Kirsher wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1482874aeea5SJeff Kirsher 1483874aeea5SJeff Kirsher efx_nic_interrupts(efx, true, false); 1484874aeea5SJeff Kirsher } 1485874aeea5SJeff Kirsher 1486874aeea5SJeff Kirsher void efx_nic_disable_interrupts(struct efx_nic *efx) 1487874aeea5SJeff Kirsher { 1488874aeea5SJeff Kirsher /* Disable interrupts */ 1489874aeea5SJeff Kirsher efx_nic_interrupts(efx, false, false); 1490874aeea5SJeff Kirsher } 1491874aeea5SJeff Kirsher 1492874aeea5SJeff Kirsher /* Generate a test interrupt 1493874aeea5SJeff Kirsher * Interrupt must already have been enabled, otherwise nasty things 1494874aeea5SJeff Kirsher * may happen. 1495874aeea5SJeff Kirsher */ 1496eee6f6a9SBen Hutchings void efx_nic_irq_test_start(struct efx_nic *efx) 1497874aeea5SJeff Kirsher { 1498eee6f6a9SBen Hutchings efx->last_irq_cpu = -1; 1499eee6f6a9SBen Hutchings smp_wmb(); 1500874aeea5SJeff Kirsher efx_nic_interrupts(efx, true, true); 1501874aeea5SJeff Kirsher } 1502874aeea5SJeff Kirsher 1503874aeea5SJeff Kirsher /* Process a fatal interrupt 1504874aeea5SJeff Kirsher * Disable bus mastering ASAP and schedule a reset 1505874aeea5SJeff Kirsher */ 1506874aeea5SJeff Kirsher irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) 1507874aeea5SJeff Kirsher { 1508874aeea5SJeff Kirsher struct falcon_nic_data *nic_data = efx->nic_data; 1509874aeea5SJeff Kirsher efx_oword_t *int_ker = efx->irq_status.addr; 1510874aeea5SJeff Kirsher efx_oword_t fatal_intr; 1511874aeea5SJeff Kirsher int error, mem_perr; 1512874aeea5SJeff Kirsher 1513874aeea5SJeff Kirsher efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1514874aeea5SJeff Kirsher error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1515874aeea5SJeff Kirsher 1516874aeea5SJeff Kirsher netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1517874aeea5SJeff Kirsher EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1518874aeea5SJeff Kirsher EFX_OWORD_VAL(fatal_intr), 1519874aeea5SJeff Kirsher error ? "disabling bus mastering" : "no recognised error"); 1520874aeea5SJeff Kirsher 1521874aeea5SJeff Kirsher /* If this is a memory parity error dump which blocks are offending */ 1522874aeea5SJeff Kirsher mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1523874aeea5SJeff Kirsher EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1524874aeea5SJeff Kirsher if (mem_perr) { 1525874aeea5SJeff Kirsher efx_oword_t reg; 1526874aeea5SJeff Kirsher efx_reado(efx, ®, FR_AZ_MEM_STAT); 1527874aeea5SJeff Kirsher netif_err(efx, hw, efx->net_dev, 1528874aeea5SJeff Kirsher "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1529874aeea5SJeff Kirsher EFX_OWORD_VAL(reg)); 1530874aeea5SJeff Kirsher } 1531874aeea5SJeff Kirsher 1532874aeea5SJeff Kirsher /* Disable both devices */ 1533874aeea5SJeff Kirsher pci_clear_master(efx->pci_dev); 1534874aeea5SJeff Kirsher if (efx_nic_is_dual_func(efx)) 1535874aeea5SJeff Kirsher pci_clear_master(nic_data->pci_dev2); 1536874aeea5SJeff Kirsher efx_nic_disable_interrupts(efx); 1537874aeea5SJeff Kirsher 1538874aeea5SJeff Kirsher /* Count errors and reset or disable the NIC accordingly */ 1539874aeea5SJeff Kirsher if (efx->int_error_count == 0 || 1540874aeea5SJeff Kirsher time_after(jiffies, efx->int_error_expire)) { 1541874aeea5SJeff Kirsher efx->int_error_count = 0; 1542874aeea5SJeff Kirsher efx->int_error_expire = 1543874aeea5SJeff Kirsher jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1544874aeea5SJeff Kirsher } 1545874aeea5SJeff Kirsher if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1546874aeea5SJeff Kirsher netif_err(efx, hw, efx->net_dev, 1547874aeea5SJeff Kirsher "SYSTEM ERROR - reset scheduled\n"); 1548874aeea5SJeff Kirsher efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1549874aeea5SJeff Kirsher } else { 1550874aeea5SJeff Kirsher netif_err(efx, hw, efx->net_dev, 1551874aeea5SJeff Kirsher "SYSTEM ERROR - max number of errors seen." 1552874aeea5SJeff Kirsher "NIC will be disabled\n"); 1553874aeea5SJeff Kirsher efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1554874aeea5SJeff Kirsher } 1555874aeea5SJeff Kirsher 1556874aeea5SJeff Kirsher return IRQ_HANDLED; 1557874aeea5SJeff Kirsher } 1558874aeea5SJeff Kirsher 1559874aeea5SJeff Kirsher /* Handle a legacy interrupt 1560874aeea5SJeff Kirsher * Acknowledges the interrupt and schedule event queue processing. 1561874aeea5SJeff Kirsher */ 1562874aeea5SJeff Kirsher static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) 1563874aeea5SJeff Kirsher { 1564874aeea5SJeff Kirsher struct efx_nic *efx = dev_id; 1565874aeea5SJeff Kirsher efx_oword_t *int_ker = efx->irq_status.addr; 1566874aeea5SJeff Kirsher irqreturn_t result = IRQ_NONE; 1567874aeea5SJeff Kirsher struct efx_channel *channel; 1568874aeea5SJeff Kirsher efx_dword_t reg; 1569874aeea5SJeff Kirsher u32 queues; 1570874aeea5SJeff Kirsher int syserr; 1571874aeea5SJeff Kirsher 1572874aeea5SJeff Kirsher /* Could this be ours? If interrupts are disabled then the 1573874aeea5SJeff Kirsher * channel state may not be valid. 1574874aeea5SJeff Kirsher */ 1575874aeea5SJeff Kirsher if (!efx->legacy_irq_enabled) 1576874aeea5SJeff Kirsher return result; 1577874aeea5SJeff Kirsher 1578874aeea5SJeff Kirsher /* Read the ISR which also ACKs the interrupts */ 1579874aeea5SJeff Kirsher efx_readd(efx, ®, FR_BZ_INT_ISR0); 1580874aeea5SJeff Kirsher queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1581874aeea5SJeff Kirsher 1582b28405b0SAlexandre Rames /* Legacy interrupts are disabled too late by the EEH kernel 1583b28405b0SAlexandre Rames * code. Disable them earlier. 1584b28405b0SAlexandre Rames * If an EEH error occurred, the read will have returned all ones. 1585b28405b0SAlexandre Rames */ 1586b28405b0SAlexandre Rames if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && 1587b28405b0SAlexandre Rames !efx->eeh_disabled_legacy_irq) { 1588b28405b0SAlexandre Rames disable_irq_nosync(efx->legacy_irq); 1589b28405b0SAlexandre Rames efx->eeh_disabled_legacy_irq = true; 1590b28405b0SAlexandre Rames } 1591b28405b0SAlexandre Rames 15921646a6f3SBen Hutchings /* Handle non-event-queue sources */ 15931646a6f3SBen Hutchings if (queues & (1U << efx->irq_level)) { 1594874aeea5SJeff Kirsher syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1595874aeea5SJeff Kirsher if (unlikely(syserr)) 1596874aeea5SJeff Kirsher return efx_nic_fatal_interrupt(efx); 15971646a6f3SBen Hutchings efx->last_irq_cpu = raw_smp_processor_id(); 1598874aeea5SJeff Kirsher } 1599874aeea5SJeff Kirsher 1600874aeea5SJeff Kirsher if (queues != 0) { 1601874aeea5SJeff Kirsher if (EFX_WORKAROUND_15783(efx)) 1602874aeea5SJeff Kirsher efx->irq_zero_count = 0; 1603874aeea5SJeff Kirsher 1604874aeea5SJeff Kirsher /* Schedule processing of any interrupting queues */ 1605874aeea5SJeff Kirsher efx_for_each_channel(channel, efx) { 1606874aeea5SJeff Kirsher if (queues & 1) 16071646a6f3SBen Hutchings efx_schedule_channel_irq(channel); 1608874aeea5SJeff Kirsher queues >>= 1; 1609874aeea5SJeff Kirsher } 1610874aeea5SJeff Kirsher result = IRQ_HANDLED; 1611874aeea5SJeff Kirsher 1612874aeea5SJeff Kirsher } else if (EFX_WORKAROUND_15783(efx)) { 1613874aeea5SJeff Kirsher efx_qword_t *event; 1614874aeea5SJeff Kirsher 1615874aeea5SJeff Kirsher /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1616874aeea5SJeff Kirsher * because this might be a shared interrupt. */ 1617874aeea5SJeff Kirsher if (efx->irq_zero_count++ == 0) 1618874aeea5SJeff Kirsher result = IRQ_HANDLED; 1619874aeea5SJeff Kirsher 1620874aeea5SJeff Kirsher /* Ensure we schedule or rearm all event queues */ 1621874aeea5SJeff Kirsher efx_for_each_channel(channel, efx) { 1622874aeea5SJeff Kirsher event = efx_event(channel, channel->eventq_read_ptr); 1623874aeea5SJeff Kirsher if (efx_event_present(event)) 16241646a6f3SBen Hutchings efx_schedule_channel_irq(channel); 1625874aeea5SJeff Kirsher else 1626874aeea5SJeff Kirsher efx_nic_eventq_read_ack(channel); 1627874aeea5SJeff Kirsher } 1628874aeea5SJeff Kirsher } 1629874aeea5SJeff Kirsher 16301646a6f3SBen Hutchings if (result == IRQ_HANDLED) 1631874aeea5SJeff Kirsher netif_vdbg(efx, intr, efx->net_dev, 1632874aeea5SJeff Kirsher "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1633874aeea5SJeff Kirsher irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1634874aeea5SJeff Kirsher 1635874aeea5SJeff Kirsher return result; 1636874aeea5SJeff Kirsher } 1637874aeea5SJeff Kirsher 1638874aeea5SJeff Kirsher /* Handle an MSI interrupt 1639874aeea5SJeff Kirsher * 1640874aeea5SJeff Kirsher * Handle an MSI hardware interrupt. This routine schedules event 1641874aeea5SJeff Kirsher * queue processing. No interrupt acknowledgement cycle is necessary. 1642874aeea5SJeff Kirsher * Also, we never need to check that the interrupt is for us, since 1643874aeea5SJeff Kirsher * MSI interrupts cannot be shared. 1644874aeea5SJeff Kirsher */ 1645874aeea5SJeff Kirsher static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) 1646874aeea5SJeff Kirsher { 1647874aeea5SJeff Kirsher struct efx_channel *channel = *(struct efx_channel **)dev_id; 1648874aeea5SJeff Kirsher struct efx_nic *efx = channel->efx; 1649874aeea5SJeff Kirsher efx_oword_t *int_ker = efx->irq_status.addr; 1650874aeea5SJeff Kirsher int syserr; 1651874aeea5SJeff Kirsher 1652874aeea5SJeff Kirsher netif_vdbg(efx, intr, efx->net_dev, 1653874aeea5SJeff Kirsher "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1654874aeea5SJeff Kirsher irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1655874aeea5SJeff Kirsher 16561646a6f3SBen Hutchings /* Handle non-event-queue sources */ 16571646a6f3SBen Hutchings if (channel->channel == efx->irq_level) { 1658874aeea5SJeff Kirsher syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1659874aeea5SJeff Kirsher if (unlikely(syserr)) 1660874aeea5SJeff Kirsher return efx_nic_fatal_interrupt(efx); 16611646a6f3SBen Hutchings efx->last_irq_cpu = raw_smp_processor_id(); 1662874aeea5SJeff Kirsher } 1663874aeea5SJeff Kirsher 1664874aeea5SJeff Kirsher /* Schedule processing of the channel */ 16651646a6f3SBen Hutchings efx_schedule_channel_irq(channel); 1666874aeea5SJeff Kirsher 1667874aeea5SJeff Kirsher return IRQ_HANDLED; 1668874aeea5SJeff Kirsher } 1669874aeea5SJeff Kirsher 1670874aeea5SJeff Kirsher 1671874aeea5SJeff Kirsher /* Setup RSS indirection table. 1672874aeea5SJeff Kirsher * This maps from the hash value of the packet to RXQ 1673874aeea5SJeff Kirsher */ 1674874aeea5SJeff Kirsher void efx_nic_push_rx_indir_table(struct efx_nic *efx) 1675874aeea5SJeff Kirsher { 1676874aeea5SJeff Kirsher size_t i = 0; 1677874aeea5SJeff Kirsher efx_dword_t dword; 1678874aeea5SJeff Kirsher 1679874aeea5SJeff Kirsher if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) 1680874aeea5SJeff Kirsher return; 1681874aeea5SJeff Kirsher 1682874aeea5SJeff Kirsher BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1683874aeea5SJeff Kirsher FR_BZ_RX_INDIRECTION_TBL_ROWS); 1684874aeea5SJeff Kirsher 1685874aeea5SJeff Kirsher for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1686874aeea5SJeff Kirsher EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1687874aeea5SJeff Kirsher efx->rx_indir_table[i]); 1688778cdaf6SBen Hutchings efx_writed(efx, &dword, 1689778cdaf6SBen Hutchings FR_BZ_RX_INDIRECTION_TBL + 1690778cdaf6SBen Hutchings FR_BZ_RX_INDIRECTION_TBL_STEP * i); 1691874aeea5SJeff Kirsher } 1692874aeea5SJeff Kirsher } 1693874aeea5SJeff Kirsher 1694874aeea5SJeff Kirsher /* Hook interrupt handler(s) 1695874aeea5SJeff Kirsher * Try MSI and then legacy interrupts. 1696874aeea5SJeff Kirsher */ 1697874aeea5SJeff Kirsher int efx_nic_init_interrupt(struct efx_nic *efx) 1698874aeea5SJeff Kirsher { 1699874aeea5SJeff Kirsher struct efx_channel *channel; 1700874aeea5SJeff Kirsher int rc; 1701874aeea5SJeff Kirsher 1702874aeea5SJeff Kirsher if (!EFX_INT_MODE_USE_MSI(efx)) { 1703874aeea5SJeff Kirsher irq_handler_t handler; 1704874aeea5SJeff Kirsher if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1705874aeea5SJeff Kirsher handler = efx_legacy_interrupt; 1706874aeea5SJeff Kirsher else 1707874aeea5SJeff Kirsher handler = falcon_legacy_interrupt_a1; 1708874aeea5SJeff Kirsher 1709874aeea5SJeff Kirsher rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, 1710874aeea5SJeff Kirsher efx->name, efx); 1711874aeea5SJeff Kirsher if (rc) { 1712874aeea5SJeff Kirsher netif_err(efx, drv, efx->net_dev, 1713874aeea5SJeff Kirsher "failed to hook legacy IRQ %d\n", 1714874aeea5SJeff Kirsher efx->pci_dev->irq); 1715874aeea5SJeff Kirsher goto fail1; 1716874aeea5SJeff Kirsher } 1717874aeea5SJeff Kirsher return 0; 1718874aeea5SJeff Kirsher } 1719874aeea5SJeff Kirsher 1720874aeea5SJeff Kirsher /* Hook MSI or MSI-X interrupt */ 1721874aeea5SJeff Kirsher efx_for_each_channel(channel, efx) { 1722874aeea5SJeff Kirsher rc = request_irq(channel->irq, efx_msi_interrupt, 1723874aeea5SJeff Kirsher IRQF_PROBE_SHARED, /* Not shared */ 1724874aeea5SJeff Kirsher efx->channel_name[channel->channel], 1725874aeea5SJeff Kirsher &efx->channel[channel->channel]); 1726874aeea5SJeff Kirsher if (rc) { 1727874aeea5SJeff Kirsher netif_err(efx, drv, efx->net_dev, 1728874aeea5SJeff Kirsher "failed to hook IRQ %d\n", channel->irq); 1729874aeea5SJeff Kirsher goto fail2; 1730874aeea5SJeff Kirsher } 1731874aeea5SJeff Kirsher } 1732874aeea5SJeff Kirsher 1733874aeea5SJeff Kirsher return 0; 1734874aeea5SJeff Kirsher 1735874aeea5SJeff Kirsher fail2: 1736874aeea5SJeff Kirsher efx_for_each_channel(channel, efx) 1737874aeea5SJeff Kirsher free_irq(channel->irq, &efx->channel[channel->channel]); 1738874aeea5SJeff Kirsher fail1: 1739874aeea5SJeff Kirsher return rc; 1740874aeea5SJeff Kirsher } 1741874aeea5SJeff Kirsher 1742874aeea5SJeff Kirsher void efx_nic_fini_interrupt(struct efx_nic *efx) 1743874aeea5SJeff Kirsher { 1744874aeea5SJeff Kirsher struct efx_channel *channel; 1745874aeea5SJeff Kirsher efx_oword_t reg; 1746874aeea5SJeff Kirsher 1747874aeea5SJeff Kirsher /* Disable MSI/MSI-X interrupts */ 1748874aeea5SJeff Kirsher efx_for_each_channel(channel, efx) { 1749874aeea5SJeff Kirsher if (channel->irq) 1750874aeea5SJeff Kirsher free_irq(channel->irq, &efx->channel[channel->channel]); 1751874aeea5SJeff Kirsher } 1752874aeea5SJeff Kirsher 1753874aeea5SJeff Kirsher /* ACK legacy interrupt */ 1754874aeea5SJeff Kirsher if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1755874aeea5SJeff Kirsher efx_reado(efx, ®, FR_BZ_INT_ISR0); 1756874aeea5SJeff Kirsher else 1757874aeea5SJeff Kirsher falcon_irq_ack_a1(efx); 1758874aeea5SJeff Kirsher 1759874aeea5SJeff Kirsher /* Disable legacy interrupt */ 1760874aeea5SJeff Kirsher if (efx->legacy_irq) 1761874aeea5SJeff Kirsher free_irq(efx->legacy_irq, efx); 1762874aeea5SJeff Kirsher } 1763874aeea5SJeff Kirsher 1764cd2d5b52SBen Hutchings /* Looks at available SRAM resources and works out how many queues we 1765cd2d5b52SBen Hutchings * can support, and where things like descriptor caches should live. 1766cd2d5b52SBen Hutchings * 1767cd2d5b52SBen Hutchings * SRAM is split up as follows: 1768cd2d5b52SBen Hutchings * 0 buftbl entries for channels 1769cd2d5b52SBen Hutchings * efx->vf_buftbl_base buftbl entries for SR-IOV 1770cd2d5b52SBen Hutchings * efx->rx_dc_base RX descriptor caches 1771cd2d5b52SBen Hutchings * efx->tx_dc_base TX descriptor caches 1772cd2d5b52SBen Hutchings */ 177328e47c49SBen Hutchings void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) 177428e47c49SBen Hutchings { 177528e47c49SBen Hutchings unsigned vi_count, buftbl_min; 177628e47c49SBen Hutchings 177728e47c49SBen Hutchings /* Account for the buffer table entries backing the datapath channels 177828e47c49SBen Hutchings * and the descriptor caches for those channels. 177928e47c49SBen Hutchings */ 178028e47c49SBen Hutchings buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + 178128e47c49SBen Hutchings efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + 178228e47c49SBen Hutchings efx->n_channels * EFX_MAX_EVQ_SIZE) 178328e47c49SBen Hutchings * sizeof(efx_qword_t) / EFX_BUF_SIZE); 178428e47c49SBen Hutchings vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); 178528e47c49SBen Hutchings 1786cd2d5b52SBen Hutchings #ifdef CONFIG_SFC_SRIOV 1787cd2d5b52SBen Hutchings if (efx_sriov_wanted(efx)) { 1788cd2d5b52SBen Hutchings unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit; 1789cd2d5b52SBen Hutchings 1790cd2d5b52SBen Hutchings efx->vf_buftbl_base = buftbl_min; 1791cd2d5b52SBen Hutchings 1792cd2d5b52SBen Hutchings vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; 1793cd2d5b52SBen Hutchings vi_count = max(vi_count, EFX_VI_BASE); 1794cd2d5b52SBen Hutchings buftbl_free = (sram_lim_qw - buftbl_min - 1795cd2d5b52SBen Hutchings vi_count * vi_dc_entries); 1796cd2d5b52SBen Hutchings 1797cd2d5b52SBen Hutchings entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) * 1798cd2d5b52SBen Hutchings efx_vf_size(efx)); 1799cd2d5b52SBen Hutchings vf_limit = min(buftbl_free / entries_per_vf, 1800cd2d5b52SBen Hutchings (1024U - EFX_VI_BASE) >> efx->vi_scale); 1801cd2d5b52SBen Hutchings 1802cd2d5b52SBen Hutchings if (efx->vf_count > vf_limit) { 1803cd2d5b52SBen Hutchings netif_err(efx, probe, efx->net_dev, 1804cd2d5b52SBen Hutchings "Reducing VF count from from %d to %d\n", 1805cd2d5b52SBen Hutchings efx->vf_count, vf_limit); 1806cd2d5b52SBen Hutchings efx->vf_count = vf_limit; 1807cd2d5b52SBen Hutchings } 1808cd2d5b52SBen Hutchings vi_count += efx->vf_count * efx_vf_size(efx); 1809cd2d5b52SBen Hutchings } 1810cd2d5b52SBen Hutchings #endif 1811cd2d5b52SBen Hutchings 181228e47c49SBen Hutchings efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; 181328e47c49SBen Hutchings efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; 181428e47c49SBen Hutchings } 181528e47c49SBen Hutchings 1816874aeea5SJeff Kirsher u32 efx_nic_fpga_ver(struct efx_nic *efx) 1817874aeea5SJeff Kirsher { 1818874aeea5SJeff Kirsher efx_oword_t altera_build; 1819874aeea5SJeff Kirsher efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1820874aeea5SJeff Kirsher return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1821874aeea5SJeff Kirsher } 1822874aeea5SJeff Kirsher 1823874aeea5SJeff Kirsher void efx_nic_init_common(struct efx_nic *efx) 1824874aeea5SJeff Kirsher { 1825874aeea5SJeff Kirsher efx_oword_t temp; 1826874aeea5SJeff Kirsher 1827874aeea5SJeff Kirsher /* Set positions of descriptor caches in SRAM. */ 182828e47c49SBen Hutchings EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); 1829874aeea5SJeff Kirsher efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 183028e47c49SBen Hutchings EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); 1831874aeea5SJeff Kirsher efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1832874aeea5SJeff Kirsher 1833874aeea5SJeff Kirsher /* Set TX descriptor cache size. */ 1834874aeea5SJeff Kirsher BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1835874aeea5SJeff Kirsher EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1836874aeea5SJeff Kirsher efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1837874aeea5SJeff Kirsher 1838874aeea5SJeff Kirsher /* Set RX descriptor cache size. Set low watermark to size-8, as 1839874aeea5SJeff Kirsher * this allows most efficient prefetching. 1840874aeea5SJeff Kirsher */ 1841874aeea5SJeff Kirsher BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1842874aeea5SJeff Kirsher EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1843874aeea5SJeff Kirsher efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1844874aeea5SJeff Kirsher EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1845874aeea5SJeff Kirsher efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1846874aeea5SJeff Kirsher 1847874aeea5SJeff Kirsher /* Program INT_KER address */ 1848874aeea5SJeff Kirsher EFX_POPULATE_OWORD_2(temp, 1849874aeea5SJeff Kirsher FRF_AZ_NORM_INT_VEC_DIS_KER, 1850874aeea5SJeff Kirsher EFX_INT_MODE_USE_MSI(efx), 1851874aeea5SJeff Kirsher FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1852874aeea5SJeff Kirsher efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1853874aeea5SJeff Kirsher 1854874aeea5SJeff Kirsher if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1855874aeea5SJeff Kirsher /* Use an interrupt level unused by event queues */ 18561646a6f3SBen Hutchings efx->irq_level = 0x1f; 1857874aeea5SJeff Kirsher else 1858874aeea5SJeff Kirsher /* Use a valid MSI-X vector */ 18591646a6f3SBen Hutchings efx->irq_level = 0; 1860874aeea5SJeff Kirsher 1861874aeea5SJeff Kirsher /* Enable all the genuinely fatal interrupts. (They are still 1862874aeea5SJeff Kirsher * masked by the overall interrupt mask, controlled by 1863874aeea5SJeff Kirsher * falcon_interrupts()). 1864874aeea5SJeff Kirsher * 1865874aeea5SJeff Kirsher * Note: All other fatal interrupts are enabled 1866874aeea5SJeff Kirsher */ 1867874aeea5SJeff Kirsher EFX_POPULATE_OWORD_3(temp, 1868874aeea5SJeff Kirsher FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1869874aeea5SJeff Kirsher FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1870874aeea5SJeff Kirsher FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1871874aeea5SJeff Kirsher if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1872874aeea5SJeff Kirsher EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1873874aeea5SJeff Kirsher EFX_INVERT_OWORD(temp); 1874874aeea5SJeff Kirsher efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1875874aeea5SJeff Kirsher 1876874aeea5SJeff Kirsher efx_nic_push_rx_indir_table(efx); 1877874aeea5SJeff Kirsher 1878874aeea5SJeff Kirsher /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1879874aeea5SJeff Kirsher * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1880874aeea5SJeff Kirsher */ 1881874aeea5SJeff Kirsher efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1882874aeea5SJeff Kirsher EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1883874aeea5SJeff Kirsher EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1884874aeea5SJeff Kirsher EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1885874aeea5SJeff Kirsher EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1886874aeea5SJeff Kirsher EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1887874aeea5SJeff Kirsher /* Enable SW_EV to inherit in char driver - assume harmless here */ 1888874aeea5SJeff Kirsher EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1889874aeea5SJeff Kirsher /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1890874aeea5SJeff Kirsher EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1891874aeea5SJeff Kirsher /* Disable hardware watchdog which can misfire */ 1892874aeea5SJeff Kirsher EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1893874aeea5SJeff Kirsher /* Squash TX of packets of 16 bytes or less */ 1894874aeea5SJeff Kirsher if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1895874aeea5SJeff Kirsher EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1896874aeea5SJeff Kirsher efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1897874aeea5SJeff Kirsher 1898874aeea5SJeff Kirsher if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 1899874aeea5SJeff Kirsher EFX_POPULATE_OWORD_4(temp, 1900874aeea5SJeff Kirsher /* Default values */ 1901874aeea5SJeff Kirsher FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1902874aeea5SJeff Kirsher FRF_BZ_TX_PACE_SB_AF, 0xb, 1903874aeea5SJeff Kirsher FRF_BZ_TX_PACE_FB_BASE, 0, 1904874aeea5SJeff Kirsher /* Allow large pace values in the 1905874aeea5SJeff Kirsher * fast bin. */ 1906874aeea5SJeff Kirsher FRF_BZ_TX_PACE_BIN_TH, 1907874aeea5SJeff Kirsher FFE_BZ_TX_PACE_RESERVED); 1908874aeea5SJeff Kirsher efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1909874aeea5SJeff Kirsher } 1910874aeea5SJeff Kirsher } 1911874aeea5SJeff Kirsher 1912874aeea5SJeff Kirsher /* Register dump */ 1913874aeea5SJeff Kirsher 1914874aeea5SJeff Kirsher #define REGISTER_REVISION_A 1 1915874aeea5SJeff Kirsher #define REGISTER_REVISION_B 2 1916874aeea5SJeff Kirsher #define REGISTER_REVISION_C 3 1917874aeea5SJeff Kirsher #define REGISTER_REVISION_Z 3 /* latest revision */ 1918874aeea5SJeff Kirsher 1919874aeea5SJeff Kirsher struct efx_nic_reg { 1920874aeea5SJeff Kirsher u32 offset:24; 1921874aeea5SJeff Kirsher u32 min_revision:2, max_revision:2; 1922874aeea5SJeff Kirsher }; 1923874aeea5SJeff Kirsher 1924874aeea5SJeff Kirsher #define REGISTER(name, min_rev, max_rev) { \ 1925874aeea5SJeff Kirsher FR_ ## min_rev ## max_rev ## _ ## name, \ 1926874aeea5SJeff Kirsher REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ 1927874aeea5SJeff Kirsher } 1928874aeea5SJeff Kirsher #define REGISTER_AA(name) REGISTER(name, A, A) 1929874aeea5SJeff Kirsher #define REGISTER_AB(name) REGISTER(name, A, B) 1930874aeea5SJeff Kirsher #define REGISTER_AZ(name) REGISTER(name, A, Z) 1931874aeea5SJeff Kirsher #define REGISTER_BB(name) REGISTER(name, B, B) 1932874aeea5SJeff Kirsher #define REGISTER_BZ(name) REGISTER(name, B, Z) 1933874aeea5SJeff Kirsher #define REGISTER_CZ(name) REGISTER(name, C, Z) 1934874aeea5SJeff Kirsher 1935874aeea5SJeff Kirsher static const struct efx_nic_reg efx_nic_regs[] = { 1936874aeea5SJeff Kirsher REGISTER_AZ(ADR_REGION), 1937874aeea5SJeff Kirsher REGISTER_AZ(INT_EN_KER), 1938874aeea5SJeff Kirsher REGISTER_BZ(INT_EN_CHAR), 1939874aeea5SJeff Kirsher REGISTER_AZ(INT_ADR_KER), 1940874aeea5SJeff Kirsher REGISTER_BZ(INT_ADR_CHAR), 1941874aeea5SJeff Kirsher /* INT_ACK_KER is WO */ 1942874aeea5SJeff Kirsher /* INT_ISR0 is RC */ 1943874aeea5SJeff Kirsher REGISTER_AZ(HW_INIT), 1944874aeea5SJeff Kirsher REGISTER_CZ(USR_EV_CFG), 1945874aeea5SJeff Kirsher REGISTER_AB(EE_SPI_HCMD), 1946874aeea5SJeff Kirsher REGISTER_AB(EE_SPI_HADR), 1947874aeea5SJeff Kirsher REGISTER_AB(EE_SPI_HDATA), 1948874aeea5SJeff Kirsher REGISTER_AB(EE_BASE_PAGE), 1949874aeea5SJeff Kirsher REGISTER_AB(EE_VPD_CFG0), 1950874aeea5SJeff Kirsher /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ 1951874aeea5SJeff Kirsher /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ 1952874aeea5SJeff Kirsher /* PCIE_CORE_INDIRECT is indirect */ 1953874aeea5SJeff Kirsher REGISTER_AB(NIC_STAT), 1954874aeea5SJeff Kirsher REGISTER_AB(GPIO_CTL), 1955874aeea5SJeff Kirsher REGISTER_AB(GLB_CTL), 1956874aeea5SJeff Kirsher /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ 1957874aeea5SJeff Kirsher REGISTER_BZ(DP_CTRL), 1958874aeea5SJeff Kirsher REGISTER_AZ(MEM_STAT), 1959874aeea5SJeff Kirsher REGISTER_AZ(CS_DEBUG), 1960874aeea5SJeff Kirsher REGISTER_AZ(ALTERA_BUILD), 1961874aeea5SJeff Kirsher REGISTER_AZ(CSR_SPARE), 1962874aeea5SJeff Kirsher REGISTER_AB(PCIE_SD_CTL0123), 1963874aeea5SJeff Kirsher REGISTER_AB(PCIE_SD_CTL45), 1964874aeea5SJeff Kirsher REGISTER_AB(PCIE_PCS_CTL_STAT), 1965874aeea5SJeff Kirsher /* DEBUG_DATA_OUT is not used */ 1966874aeea5SJeff Kirsher /* DRV_EV is WO */ 1967874aeea5SJeff Kirsher REGISTER_AZ(EVQ_CTL), 1968874aeea5SJeff Kirsher REGISTER_AZ(EVQ_CNT1), 1969874aeea5SJeff Kirsher REGISTER_AZ(EVQ_CNT2), 1970874aeea5SJeff Kirsher REGISTER_AZ(BUF_TBL_CFG), 1971874aeea5SJeff Kirsher REGISTER_AZ(SRM_RX_DC_CFG), 1972874aeea5SJeff Kirsher REGISTER_AZ(SRM_TX_DC_CFG), 1973874aeea5SJeff Kirsher REGISTER_AZ(SRM_CFG), 1974874aeea5SJeff Kirsher /* BUF_TBL_UPD is WO */ 1975874aeea5SJeff Kirsher REGISTER_AZ(SRM_UPD_EVQ), 1976874aeea5SJeff Kirsher REGISTER_AZ(SRAM_PARITY), 1977874aeea5SJeff Kirsher REGISTER_AZ(RX_CFG), 1978874aeea5SJeff Kirsher REGISTER_BZ(RX_FILTER_CTL), 1979874aeea5SJeff Kirsher /* RX_FLUSH_DESCQ is WO */ 1980874aeea5SJeff Kirsher REGISTER_AZ(RX_DC_CFG), 1981874aeea5SJeff Kirsher REGISTER_AZ(RX_DC_PF_WM), 1982874aeea5SJeff Kirsher REGISTER_BZ(RX_RSS_TKEY), 1983874aeea5SJeff Kirsher /* RX_NODESC_DROP is RC */ 1984874aeea5SJeff Kirsher REGISTER_AA(RX_SELF_RST), 1985874aeea5SJeff Kirsher /* RX_DEBUG, RX_PUSH_DROP are not used */ 1986874aeea5SJeff Kirsher REGISTER_CZ(RX_RSS_IPV6_REG1), 1987874aeea5SJeff Kirsher REGISTER_CZ(RX_RSS_IPV6_REG2), 1988874aeea5SJeff Kirsher REGISTER_CZ(RX_RSS_IPV6_REG3), 1989874aeea5SJeff Kirsher /* TX_FLUSH_DESCQ is WO */ 1990874aeea5SJeff Kirsher REGISTER_AZ(TX_DC_CFG), 1991874aeea5SJeff Kirsher REGISTER_AA(TX_CHKSM_CFG), 1992874aeea5SJeff Kirsher REGISTER_AZ(TX_CFG), 1993874aeea5SJeff Kirsher /* TX_PUSH_DROP is not used */ 1994874aeea5SJeff Kirsher REGISTER_AZ(TX_RESERVED), 1995874aeea5SJeff Kirsher REGISTER_BZ(TX_PACE), 1996874aeea5SJeff Kirsher /* TX_PACE_DROP_QID is RC */ 1997874aeea5SJeff Kirsher REGISTER_BB(TX_VLAN), 1998874aeea5SJeff Kirsher REGISTER_BZ(TX_IPFIL_PORTEN), 1999874aeea5SJeff Kirsher REGISTER_AB(MD_TXD), 2000874aeea5SJeff Kirsher REGISTER_AB(MD_RXD), 2001874aeea5SJeff Kirsher REGISTER_AB(MD_CS), 2002874aeea5SJeff Kirsher REGISTER_AB(MD_PHY_ADR), 2003874aeea5SJeff Kirsher REGISTER_AB(MD_ID), 2004874aeea5SJeff Kirsher /* MD_STAT is RC */ 2005874aeea5SJeff Kirsher REGISTER_AB(MAC_STAT_DMA), 2006874aeea5SJeff Kirsher REGISTER_AB(MAC_CTRL), 2007874aeea5SJeff Kirsher REGISTER_BB(GEN_MODE), 2008874aeea5SJeff Kirsher REGISTER_AB(MAC_MC_HASH_REG0), 2009874aeea5SJeff Kirsher REGISTER_AB(MAC_MC_HASH_REG1), 2010874aeea5SJeff Kirsher REGISTER_AB(GM_CFG1), 2011874aeea5SJeff Kirsher REGISTER_AB(GM_CFG2), 2012874aeea5SJeff Kirsher /* GM_IPG and GM_HD are not used */ 2013874aeea5SJeff Kirsher REGISTER_AB(GM_MAX_FLEN), 2014874aeea5SJeff Kirsher /* GM_TEST is not used */ 2015874aeea5SJeff Kirsher REGISTER_AB(GM_ADR1), 2016874aeea5SJeff Kirsher REGISTER_AB(GM_ADR2), 2017874aeea5SJeff Kirsher REGISTER_AB(GMF_CFG0), 2018874aeea5SJeff Kirsher REGISTER_AB(GMF_CFG1), 2019874aeea5SJeff Kirsher REGISTER_AB(GMF_CFG2), 2020874aeea5SJeff Kirsher REGISTER_AB(GMF_CFG3), 2021874aeea5SJeff Kirsher REGISTER_AB(GMF_CFG4), 2022874aeea5SJeff Kirsher REGISTER_AB(GMF_CFG5), 2023874aeea5SJeff Kirsher REGISTER_BB(TX_SRC_MAC_CTL), 2024874aeea5SJeff Kirsher REGISTER_AB(XM_ADR_LO), 2025874aeea5SJeff Kirsher REGISTER_AB(XM_ADR_HI), 2026874aeea5SJeff Kirsher REGISTER_AB(XM_GLB_CFG), 2027874aeea5SJeff Kirsher REGISTER_AB(XM_TX_CFG), 2028874aeea5SJeff Kirsher REGISTER_AB(XM_RX_CFG), 2029874aeea5SJeff Kirsher REGISTER_AB(XM_MGT_INT_MASK), 2030874aeea5SJeff Kirsher REGISTER_AB(XM_FC), 2031874aeea5SJeff Kirsher REGISTER_AB(XM_PAUSE_TIME), 2032874aeea5SJeff Kirsher REGISTER_AB(XM_TX_PARAM), 2033874aeea5SJeff Kirsher REGISTER_AB(XM_RX_PARAM), 2034874aeea5SJeff Kirsher /* XM_MGT_INT_MSK (note no 'A') is RC */ 2035874aeea5SJeff Kirsher REGISTER_AB(XX_PWR_RST), 2036874aeea5SJeff Kirsher REGISTER_AB(XX_SD_CTL), 2037874aeea5SJeff Kirsher REGISTER_AB(XX_TXDRV_CTL), 2038874aeea5SJeff Kirsher /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ 2039874aeea5SJeff Kirsher /* XX_CORE_STAT is partly RC */ 2040874aeea5SJeff Kirsher }; 2041874aeea5SJeff Kirsher 2042874aeea5SJeff Kirsher struct efx_nic_reg_table { 2043874aeea5SJeff Kirsher u32 offset:24; 2044874aeea5SJeff Kirsher u32 min_revision:2, max_revision:2; 2045874aeea5SJeff Kirsher u32 step:6, rows:21; 2046874aeea5SJeff Kirsher }; 2047874aeea5SJeff Kirsher 2048874aeea5SJeff Kirsher #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ 2049874aeea5SJeff Kirsher offset, \ 2050874aeea5SJeff Kirsher REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ 2051874aeea5SJeff Kirsher step, rows \ 2052874aeea5SJeff Kirsher } 2053874aeea5SJeff Kirsher #define REGISTER_TABLE(name, min_rev, max_rev) \ 2054874aeea5SJeff Kirsher REGISTER_TABLE_DIMENSIONS( \ 2055874aeea5SJeff Kirsher name, FR_ ## min_rev ## max_rev ## _ ## name, \ 2056874aeea5SJeff Kirsher min_rev, max_rev, \ 2057874aeea5SJeff Kirsher FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ 2058874aeea5SJeff Kirsher FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) 2059874aeea5SJeff Kirsher #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) 2060874aeea5SJeff Kirsher #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) 2061874aeea5SJeff Kirsher #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) 2062874aeea5SJeff Kirsher #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) 2063874aeea5SJeff Kirsher #define REGISTER_TABLE_BB_CZ(name) \ 2064874aeea5SJeff Kirsher REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ 2065874aeea5SJeff Kirsher FR_BZ_ ## name ## _STEP, \ 2066874aeea5SJeff Kirsher FR_BB_ ## name ## _ROWS), \ 2067874aeea5SJeff Kirsher REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ 2068874aeea5SJeff Kirsher FR_BZ_ ## name ## _STEP, \ 2069874aeea5SJeff Kirsher FR_CZ_ ## name ## _ROWS) 2070874aeea5SJeff Kirsher #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) 2071874aeea5SJeff Kirsher 2072874aeea5SJeff Kirsher static const struct efx_nic_reg_table efx_nic_reg_tables[] = { 2073874aeea5SJeff Kirsher /* DRIVER is not used */ 2074874aeea5SJeff Kirsher /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ 2075874aeea5SJeff Kirsher REGISTER_TABLE_BB(TX_IPFIL_TBL), 2076874aeea5SJeff Kirsher REGISTER_TABLE_BB(TX_SRC_MAC_TBL), 2077874aeea5SJeff Kirsher REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), 2078874aeea5SJeff Kirsher REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), 2079874aeea5SJeff Kirsher REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), 2080874aeea5SJeff Kirsher REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), 2081874aeea5SJeff Kirsher REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), 2082874aeea5SJeff Kirsher REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), 2083874aeea5SJeff Kirsher /* We can't reasonably read all of the buffer table (up to 8MB!). 2084874aeea5SJeff Kirsher * However this driver will only use a few entries. Reading 2085874aeea5SJeff Kirsher * 1K entries allows for some expansion of queue count and 2086874aeea5SJeff Kirsher * size before we need to change the version. */ 2087874aeea5SJeff Kirsher REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, 2088874aeea5SJeff Kirsher A, A, 8, 1024), 2089874aeea5SJeff Kirsher REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, 2090874aeea5SJeff Kirsher B, Z, 8, 1024), 2091874aeea5SJeff Kirsher REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), 2092874aeea5SJeff Kirsher REGISTER_TABLE_BB_CZ(TIMER_TBL), 2093874aeea5SJeff Kirsher REGISTER_TABLE_BB_CZ(TX_PACE_TBL), 2094874aeea5SJeff Kirsher REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), 2095874aeea5SJeff Kirsher /* TX_FILTER_TBL0 is huge and not used by this driver */ 2096874aeea5SJeff Kirsher REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), 2097874aeea5SJeff Kirsher REGISTER_TABLE_CZ(MC_TREG_SMEM), 2098874aeea5SJeff Kirsher /* MSIX_PBA_TABLE is not mapped */ 2099874aeea5SJeff Kirsher /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ 2100874aeea5SJeff Kirsher REGISTER_TABLE_BZ(RX_FILTER_TBL0), 2101874aeea5SJeff Kirsher }; 2102874aeea5SJeff Kirsher 2103874aeea5SJeff Kirsher size_t efx_nic_get_regs_len(struct efx_nic *efx) 2104874aeea5SJeff Kirsher { 2105874aeea5SJeff Kirsher const struct efx_nic_reg *reg; 2106874aeea5SJeff Kirsher const struct efx_nic_reg_table *table; 2107874aeea5SJeff Kirsher size_t len = 0; 2108874aeea5SJeff Kirsher 2109874aeea5SJeff Kirsher for (reg = efx_nic_regs; 2110874aeea5SJeff Kirsher reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 2111874aeea5SJeff Kirsher reg++) 2112874aeea5SJeff Kirsher if (efx->type->revision >= reg->min_revision && 2113874aeea5SJeff Kirsher efx->type->revision <= reg->max_revision) 2114874aeea5SJeff Kirsher len += sizeof(efx_oword_t); 2115874aeea5SJeff Kirsher 2116874aeea5SJeff Kirsher for (table = efx_nic_reg_tables; 2117874aeea5SJeff Kirsher table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2118874aeea5SJeff Kirsher table++) 2119874aeea5SJeff Kirsher if (efx->type->revision >= table->min_revision && 2120874aeea5SJeff Kirsher efx->type->revision <= table->max_revision) 2121874aeea5SJeff Kirsher len += table->rows * min_t(size_t, table->step, 16); 2122874aeea5SJeff Kirsher 2123874aeea5SJeff Kirsher return len; 2124874aeea5SJeff Kirsher } 2125874aeea5SJeff Kirsher 2126874aeea5SJeff Kirsher void efx_nic_get_regs(struct efx_nic *efx, void *buf) 2127874aeea5SJeff Kirsher { 2128874aeea5SJeff Kirsher const struct efx_nic_reg *reg; 2129874aeea5SJeff Kirsher const struct efx_nic_reg_table *table; 2130874aeea5SJeff Kirsher 2131874aeea5SJeff Kirsher for (reg = efx_nic_regs; 2132874aeea5SJeff Kirsher reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); 2133874aeea5SJeff Kirsher reg++) { 2134874aeea5SJeff Kirsher if (efx->type->revision >= reg->min_revision && 2135874aeea5SJeff Kirsher efx->type->revision <= reg->max_revision) { 2136874aeea5SJeff Kirsher efx_reado(efx, (efx_oword_t *)buf, reg->offset); 2137874aeea5SJeff Kirsher buf += sizeof(efx_oword_t); 2138874aeea5SJeff Kirsher } 2139874aeea5SJeff Kirsher } 2140874aeea5SJeff Kirsher 2141874aeea5SJeff Kirsher for (table = efx_nic_reg_tables; 2142874aeea5SJeff Kirsher table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); 2143874aeea5SJeff Kirsher table++) { 2144874aeea5SJeff Kirsher size_t size, i; 2145874aeea5SJeff Kirsher 2146874aeea5SJeff Kirsher if (!(efx->type->revision >= table->min_revision && 2147874aeea5SJeff Kirsher efx->type->revision <= table->max_revision)) 2148874aeea5SJeff Kirsher continue; 2149874aeea5SJeff Kirsher 2150874aeea5SJeff Kirsher size = min_t(size_t, table->step, 16); 2151874aeea5SJeff Kirsher 2152874aeea5SJeff Kirsher for (i = 0; i < table->rows; i++) { 2153874aeea5SJeff Kirsher switch (table->step) { 2154778cdaf6SBen Hutchings case 4: /* 32-bit SRAM */ 2155778cdaf6SBen Hutchings efx_readd(efx, buf, table->offset + 4 * i); 2156874aeea5SJeff Kirsher break; 2157874aeea5SJeff Kirsher case 8: /* 64-bit SRAM */ 2158874aeea5SJeff Kirsher efx_sram_readq(efx, 2159874aeea5SJeff Kirsher efx->membase + table->offset, 2160874aeea5SJeff Kirsher buf, i); 2161874aeea5SJeff Kirsher break; 2162778cdaf6SBen Hutchings case 16: /* 128-bit-readable register */ 2163874aeea5SJeff Kirsher efx_reado_table(efx, buf, table->offset, i); 2164874aeea5SJeff Kirsher break; 2165874aeea5SJeff Kirsher case 32: /* 128-bit register, interleaved */ 2166874aeea5SJeff Kirsher efx_reado_table(efx, buf, table->offset, 2 * i); 2167874aeea5SJeff Kirsher break; 2168874aeea5SJeff Kirsher default: 2169874aeea5SJeff Kirsher WARN_ON(1); 2170874aeea5SJeff Kirsher return; 2171874aeea5SJeff Kirsher } 2172874aeea5SJeff Kirsher buf += size; 2173874aeea5SJeff Kirsher } 2174874aeea5SJeff Kirsher } 2175874aeea5SJeff Kirsher } 2176