xref: /openbmc/linux/drivers/net/ethernet/sfc/nic.c (revision 874aeea5d01cac55c160a4e503e3ddb4db030de7)
1*874aeea5SJeff Kirsher /****************************************************************************
2*874aeea5SJeff Kirsher  * Driver for Solarflare Solarstorm network controllers and boards
3*874aeea5SJeff Kirsher  * Copyright 2005-2006 Fen Systems Ltd.
4*874aeea5SJeff Kirsher  * Copyright 2006-2011 Solarflare Communications Inc.
5*874aeea5SJeff Kirsher  *
6*874aeea5SJeff Kirsher  * This program is free software; you can redistribute it and/or modify it
7*874aeea5SJeff Kirsher  * under the terms of the GNU General Public License version 2 as published
8*874aeea5SJeff Kirsher  * by the Free Software Foundation, incorporated herein by reference.
9*874aeea5SJeff Kirsher  */
10*874aeea5SJeff Kirsher 
11*874aeea5SJeff Kirsher #include <linux/bitops.h>
12*874aeea5SJeff Kirsher #include <linux/delay.h>
13*874aeea5SJeff Kirsher #include <linux/interrupt.h>
14*874aeea5SJeff Kirsher #include <linux/pci.h>
15*874aeea5SJeff Kirsher #include <linux/module.h>
16*874aeea5SJeff Kirsher #include <linux/seq_file.h>
17*874aeea5SJeff Kirsher #include "net_driver.h"
18*874aeea5SJeff Kirsher #include "bitfield.h"
19*874aeea5SJeff Kirsher #include "efx.h"
20*874aeea5SJeff Kirsher #include "nic.h"
21*874aeea5SJeff Kirsher #include "regs.h"
22*874aeea5SJeff Kirsher #include "io.h"
23*874aeea5SJeff Kirsher #include "workarounds.h"
24*874aeea5SJeff Kirsher 
25*874aeea5SJeff Kirsher /**************************************************************************
26*874aeea5SJeff Kirsher  *
27*874aeea5SJeff Kirsher  * Configurable values
28*874aeea5SJeff Kirsher  *
29*874aeea5SJeff Kirsher  **************************************************************************
30*874aeea5SJeff Kirsher  */
31*874aeea5SJeff Kirsher 
32*874aeea5SJeff Kirsher /* This is set to 16 for a good reason.  In summary, if larger than
33*874aeea5SJeff Kirsher  * 16, the descriptor cache holds more than a default socket
34*874aeea5SJeff Kirsher  * buffer's worth of packets (for UDP we can only have at most one
35*874aeea5SJeff Kirsher  * socket buffer's worth outstanding).  This combined with the fact
36*874aeea5SJeff Kirsher  * that we only get 1 TX event per descriptor cache means the NIC
37*874aeea5SJeff Kirsher  * goes idle.
38*874aeea5SJeff Kirsher  */
39*874aeea5SJeff Kirsher #define TX_DC_ENTRIES 16
40*874aeea5SJeff Kirsher #define TX_DC_ENTRIES_ORDER 1
41*874aeea5SJeff Kirsher 
42*874aeea5SJeff Kirsher #define RX_DC_ENTRIES 64
43*874aeea5SJeff Kirsher #define RX_DC_ENTRIES_ORDER 3
44*874aeea5SJeff Kirsher 
45*874aeea5SJeff Kirsher /* If EFX_MAX_INT_ERRORS internal errors occur within
46*874aeea5SJeff Kirsher  * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
47*874aeea5SJeff Kirsher  * disable it.
48*874aeea5SJeff Kirsher  */
49*874aeea5SJeff Kirsher #define EFX_INT_ERROR_EXPIRE 3600
50*874aeea5SJeff Kirsher #define EFX_MAX_INT_ERRORS 5
51*874aeea5SJeff Kirsher 
52*874aeea5SJeff Kirsher /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
53*874aeea5SJeff Kirsher  */
54*874aeea5SJeff Kirsher #define EFX_FLUSH_INTERVAL 10
55*874aeea5SJeff Kirsher #define EFX_FLUSH_POLL_COUNT 100
56*874aeea5SJeff Kirsher 
57*874aeea5SJeff Kirsher /* Size and alignment of special buffers (4KB) */
58*874aeea5SJeff Kirsher #define EFX_BUF_SIZE 4096
59*874aeea5SJeff Kirsher 
60*874aeea5SJeff Kirsher /* Depth of RX flush request fifo */
61*874aeea5SJeff Kirsher #define EFX_RX_FLUSH_COUNT 4
62*874aeea5SJeff Kirsher 
63*874aeea5SJeff Kirsher /* Generated event code for efx_generate_test_event() */
64*874aeea5SJeff Kirsher #define EFX_CHANNEL_MAGIC_TEST(_channel)	\
65*874aeea5SJeff Kirsher 	(0x00010100 + (_channel)->channel)
66*874aeea5SJeff Kirsher 
67*874aeea5SJeff Kirsher /* Generated event code for efx_generate_fill_event() */
68*874aeea5SJeff Kirsher #define EFX_CHANNEL_MAGIC_FILL(_channel)	\
69*874aeea5SJeff Kirsher 	(0x00010200 + (_channel)->channel)
70*874aeea5SJeff Kirsher 
71*874aeea5SJeff Kirsher /**************************************************************************
72*874aeea5SJeff Kirsher  *
73*874aeea5SJeff Kirsher  * Solarstorm hardware access
74*874aeea5SJeff Kirsher  *
75*874aeea5SJeff Kirsher  **************************************************************************/
76*874aeea5SJeff Kirsher 
77*874aeea5SJeff Kirsher static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
78*874aeea5SJeff Kirsher 				     unsigned int index)
79*874aeea5SJeff Kirsher {
80*874aeea5SJeff Kirsher 	efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
81*874aeea5SJeff Kirsher 			value, index);
82*874aeea5SJeff Kirsher }
83*874aeea5SJeff Kirsher 
84*874aeea5SJeff Kirsher /* Read the current event from the event queue */
85*874aeea5SJeff Kirsher static inline efx_qword_t *efx_event(struct efx_channel *channel,
86*874aeea5SJeff Kirsher 				     unsigned int index)
87*874aeea5SJeff Kirsher {
88*874aeea5SJeff Kirsher 	return ((efx_qword_t *) (channel->eventq.addr)) +
89*874aeea5SJeff Kirsher 		(index & channel->eventq_mask);
90*874aeea5SJeff Kirsher }
91*874aeea5SJeff Kirsher 
92*874aeea5SJeff Kirsher /* See if an event is present
93*874aeea5SJeff Kirsher  *
94*874aeea5SJeff Kirsher  * We check both the high and low dword of the event for all ones.  We
95*874aeea5SJeff Kirsher  * wrote all ones when we cleared the event, and no valid event can
96*874aeea5SJeff Kirsher  * have all ones in either its high or low dwords.  This approach is
97*874aeea5SJeff Kirsher  * robust against reordering.
98*874aeea5SJeff Kirsher  *
99*874aeea5SJeff Kirsher  * Note that using a single 64-bit comparison is incorrect; even
100*874aeea5SJeff Kirsher  * though the CPU read will be atomic, the DMA write may not be.
101*874aeea5SJeff Kirsher  */
102*874aeea5SJeff Kirsher static inline int efx_event_present(efx_qword_t *event)
103*874aeea5SJeff Kirsher {
104*874aeea5SJeff Kirsher 	return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
105*874aeea5SJeff Kirsher 		  EFX_DWORD_IS_ALL_ONES(event->dword[1]));
106*874aeea5SJeff Kirsher }
107*874aeea5SJeff Kirsher 
108*874aeea5SJeff Kirsher static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
109*874aeea5SJeff Kirsher 				     const efx_oword_t *mask)
110*874aeea5SJeff Kirsher {
111*874aeea5SJeff Kirsher 	return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
112*874aeea5SJeff Kirsher 		((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
113*874aeea5SJeff Kirsher }
114*874aeea5SJeff Kirsher 
115*874aeea5SJeff Kirsher int efx_nic_test_registers(struct efx_nic *efx,
116*874aeea5SJeff Kirsher 			   const struct efx_nic_register_test *regs,
117*874aeea5SJeff Kirsher 			   size_t n_regs)
118*874aeea5SJeff Kirsher {
119*874aeea5SJeff Kirsher 	unsigned address = 0, i, j;
120*874aeea5SJeff Kirsher 	efx_oword_t mask, imask, original, reg, buf;
121*874aeea5SJeff Kirsher 
122*874aeea5SJeff Kirsher 	/* Falcon should be in loopback to isolate the XMAC from the PHY */
123*874aeea5SJeff Kirsher 	WARN_ON(!LOOPBACK_INTERNAL(efx));
124*874aeea5SJeff Kirsher 
125*874aeea5SJeff Kirsher 	for (i = 0; i < n_regs; ++i) {
126*874aeea5SJeff Kirsher 		address = regs[i].address;
127*874aeea5SJeff Kirsher 		mask = imask = regs[i].mask;
128*874aeea5SJeff Kirsher 		EFX_INVERT_OWORD(imask);
129*874aeea5SJeff Kirsher 
130*874aeea5SJeff Kirsher 		efx_reado(efx, &original, address);
131*874aeea5SJeff Kirsher 
132*874aeea5SJeff Kirsher 		/* bit sweep on and off */
133*874aeea5SJeff Kirsher 		for (j = 0; j < 128; j++) {
134*874aeea5SJeff Kirsher 			if (!EFX_EXTRACT_OWORD32(mask, j, j))
135*874aeea5SJeff Kirsher 				continue;
136*874aeea5SJeff Kirsher 
137*874aeea5SJeff Kirsher 			/* Test this testable bit can be set in isolation */
138*874aeea5SJeff Kirsher 			EFX_AND_OWORD(reg, original, mask);
139*874aeea5SJeff Kirsher 			EFX_SET_OWORD32(reg, j, j, 1);
140*874aeea5SJeff Kirsher 
141*874aeea5SJeff Kirsher 			efx_writeo(efx, &reg, address);
142*874aeea5SJeff Kirsher 			efx_reado(efx, &buf, address);
143*874aeea5SJeff Kirsher 
144*874aeea5SJeff Kirsher 			if (efx_masked_compare_oword(&reg, &buf, &mask))
145*874aeea5SJeff Kirsher 				goto fail;
146*874aeea5SJeff Kirsher 
147*874aeea5SJeff Kirsher 			/* Test this testable bit can be cleared in isolation */
148*874aeea5SJeff Kirsher 			EFX_OR_OWORD(reg, original, mask);
149*874aeea5SJeff Kirsher 			EFX_SET_OWORD32(reg, j, j, 0);
150*874aeea5SJeff Kirsher 
151*874aeea5SJeff Kirsher 			efx_writeo(efx, &reg, address);
152*874aeea5SJeff Kirsher 			efx_reado(efx, &buf, address);
153*874aeea5SJeff Kirsher 
154*874aeea5SJeff Kirsher 			if (efx_masked_compare_oword(&reg, &buf, &mask))
155*874aeea5SJeff Kirsher 				goto fail;
156*874aeea5SJeff Kirsher 		}
157*874aeea5SJeff Kirsher 
158*874aeea5SJeff Kirsher 		efx_writeo(efx, &original, address);
159*874aeea5SJeff Kirsher 	}
160*874aeea5SJeff Kirsher 
161*874aeea5SJeff Kirsher 	return 0;
162*874aeea5SJeff Kirsher 
163*874aeea5SJeff Kirsher fail:
164*874aeea5SJeff Kirsher 	netif_err(efx, hw, efx->net_dev,
165*874aeea5SJeff Kirsher 		  "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
166*874aeea5SJeff Kirsher 		  " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
167*874aeea5SJeff Kirsher 		  EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
168*874aeea5SJeff Kirsher 	return -EIO;
169*874aeea5SJeff Kirsher }
170*874aeea5SJeff Kirsher 
171*874aeea5SJeff Kirsher /**************************************************************************
172*874aeea5SJeff Kirsher  *
173*874aeea5SJeff Kirsher  * Special buffer handling
174*874aeea5SJeff Kirsher  * Special buffers are used for event queues and the TX and RX
175*874aeea5SJeff Kirsher  * descriptor rings.
176*874aeea5SJeff Kirsher  *
177*874aeea5SJeff Kirsher  *************************************************************************/
178*874aeea5SJeff Kirsher 
179*874aeea5SJeff Kirsher /*
180*874aeea5SJeff Kirsher  * Initialise a special buffer
181*874aeea5SJeff Kirsher  *
182*874aeea5SJeff Kirsher  * This will define a buffer (previously allocated via
183*874aeea5SJeff Kirsher  * efx_alloc_special_buffer()) in the buffer table, allowing
184*874aeea5SJeff Kirsher  * it to be used for event queues, descriptor rings etc.
185*874aeea5SJeff Kirsher  */
186*874aeea5SJeff Kirsher static void
187*874aeea5SJeff Kirsher efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
188*874aeea5SJeff Kirsher {
189*874aeea5SJeff Kirsher 	efx_qword_t buf_desc;
190*874aeea5SJeff Kirsher 	int index;
191*874aeea5SJeff Kirsher 	dma_addr_t dma_addr;
192*874aeea5SJeff Kirsher 	int i;
193*874aeea5SJeff Kirsher 
194*874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(!buffer->addr);
195*874aeea5SJeff Kirsher 
196*874aeea5SJeff Kirsher 	/* Write buffer descriptors to NIC */
197*874aeea5SJeff Kirsher 	for (i = 0; i < buffer->entries; i++) {
198*874aeea5SJeff Kirsher 		index = buffer->index + i;
199*874aeea5SJeff Kirsher 		dma_addr = buffer->dma_addr + (i * 4096);
200*874aeea5SJeff Kirsher 		netif_dbg(efx, probe, efx->net_dev,
201*874aeea5SJeff Kirsher 			  "mapping special buffer %d at %llx\n",
202*874aeea5SJeff Kirsher 			  index, (unsigned long long)dma_addr);
203*874aeea5SJeff Kirsher 		EFX_POPULATE_QWORD_3(buf_desc,
204*874aeea5SJeff Kirsher 				     FRF_AZ_BUF_ADR_REGION, 0,
205*874aeea5SJeff Kirsher 				     FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
206*874aeea5SJeff Kirsher 				     FRF_AZ_BUF_OWNER_ID_FBUF, 0);
207*874aeea5SJeff Kirsher 		efx_write_buf_tbl(efx, &buf_desc, index);
208*874aeea5SJeff Kirsher 	}
209*874aeea5SJeff Kirsher }
210*874aeea5SJeff Kirsher 
211*874aeea5SJeff Kirsher /* Unmaps a buffer and clears the buffer table entries */
212*874aeea5SJeff Kirsher static void
213*874aeea5SJeff Kirsher efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
214*874aeea5SJeff Kirsher {
215*874aeea5SJeff Kirsher 	efx_oword_t buf_tbl_upd;
216*874aeea5SJeff Kirsher 	unsigned int start = buffer->index;
217*874aeea5SJeff Kirsher 	unsigned int end = (buffer->index + buffer->entries - 1);
218*874aeea5SJeff Kirsher 
219*874aeea5SJeff Kirsher 	if (!buffer->entries)
220*874aeea5SJeff Kirsher 		return;
221*874aeea5SJeff Kirsher 
222*874aeea5SJeff Kirsher 	netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
223*874aeea5SJeff Kirsher 		  buffer->index, buffer->index + buffer->entries - 1);
224*874aeea5SJeff Kirsher 
225*874aeea5SJeff Kirsher 	EFX_POPULATE_OWORD_4(buf_tbl_upd,
226*874aeea5SJeff Kirsher 			     FRF_AZ_BUF_UPD_CMD, 0,
227*874aeea5SJeff Kirsher 			     FRF_AZ_BUF_CLR_CMD, 1,
228*874aeea5SJeff Kirsher 			     FRF_AZ_BUF_CLR_END_ID, end,
229*874aeea5SJeff Kirsher 			     FRF_AZ_BUF_CLR_START_ID, start);
230*874aeea5SJeff Kirsher 	efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
231*874aeea5SJeff Kirsher }
232*874aeea5SJeff Kirsher 
233*874aeea5SJeff Kirsher /*
234*874aeea5SJeff Kirsher  * Allocate a new special buffer
235*874aeea5SJeff Kirsher  *
236*874aeea5SJeff Kirsher  * This allocates memory for a new buffer, clears it and allocates a
237*874aeea5SJeff Kirsher  * new buffer ID range.  It does not write into the buffer table.
238*874aeea5SJeff Kirsher  *
239*874aeea5SJeff Kirsher  * This call will allocate 4KB buffers, since 8KB buffers can't be
240*874aeea5SJeff Kirsher  * used for event queues and descriptor rings.
241*874aeea5SJeff Kirsher  */
242*874aeea5SJeff Kirsher static int efx_alloc_special_buffer(struct efx_nic *efx,
243*874aeea5SJeff Kirsher 				    struct efx_special_buffer *buffer,
244*874aeea5SJeff Kirsher 				    unsigned int len)
245*874aeea5SJeff Kirsher {
246*874aeea5SJeff Kirsher 	len = ALIGN(len, EFX_BUF_SIZE);
247*874aeea5SJeff Kirsher 
248*874aeea5SJeff Kirsher 	buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
249*874aeea5SJeff Kirsher 					  &buffer->dma_addr, GFP_KERNEL);
250*874aeea5SJeff Kirsher 	if (!buffer->addr)
251*874aeea5SJeff Kirsher 		return -ENOMEM;
252*874aeea5SJeff Kirsher 	buffer->len = len;
253*874aeea5SJeff Kirsher 	buffer->entries = len / EFX_BUF_SIZE;
254*874aeea5SJeff Kirsher 	BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
255*874aeea5SJeff Kirsher 
256*874aeea5SJeff Kirsher 	/* All zeros is a potentially valid event so memset to 0xff */
257*874aeea5SJeff Kirsher 	memset(buffer->addr, 0xff, len);
258*874aeea5SJeff Kirsher 
259*874aeea5SJeff Kirsher 	/* Select new buffer ID */
260*874aeea5SJeff Kirsher 	buffer->index = efx->next_buffer_table;
261*874aeea5SJeff Kirsher 	efx->next_buffer_table += buffer->entries;
262*874aeea5SJeff Kirsher 
263*874aeea5SJeff Kirsher 	netif_dbg(efx, probe, efx->net_dev,
264*874aeea5SJeff Kirsher 		  "allocating special buffers %d-%d at %llx+%x "
265*874aeea5SJeff Kirsher 		  "(virt %p phys %llx)\n", buffer->index,
266*874aeea5SJeff Kirsher 		  buffer->index + buffer->entries - 1,
267*874aeea5SJeff Kirsher 		  (u64)buffer->dma_addr, len,
268*874aeea5SJeff Kirsher 		  buffer->addr, (u64)virt_to_phys(buffer->addr));
269*874aeea5SJeff Kirsher 
270*874aeea5SJeff Kirsher 	return 0;
271*874aeea5SJeff Kirsher }
272*874aeea5SJeff Kirsher 
273*874aeea5SJeff Kirsher static void
274*874aeea5SJeff Kirsher efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
275*874aeea5SJeff Kirsher {
276*874aeea5SJeff Kirsher 	if (!buffer->addr)
277*874aeea5SJeff Kirsher 		return;
278*874aeea5SJeff Kirsher 
279*874aeea5SJeff Kirsher 	netif_dbg(efx, hw, efx->net_dev,
280*874aeea5SJeff Kirsher 		  "deallocating special buffers %d-%d at %llx+%x "
281*874aeea5SJeff Kirsher 		  "(virt %p phys %llx)\n", buffer->index,
282*874aeea5SJeff Kirsher 		  buffer->index + buffer->entries - 1,
283*874aeea5SJeff Kirsher 		  (u64)buffer->dma_addr, buffer->len,
284*874aeea5SJeff Kirsher 		  buffer->addr, (u64)virt_to_phys(buffer->addr));
285*874aeea5SJeff Kirsher 
286*874aeea5SJeff Kirsher 	dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
287*874aeea5SJeff Kirsher 			  buffer->dma_addr);
288*874aeea5SJeff Kirsher 	buffer->addr = NULL;
289*874aeea5SJeff Kirsher 	buffer->entries = 0;
290*874aeea5SJeff Kirsher }
291*874aeea5SJeff Kirsher 
292*874aeea5SJeff Kirsher /**************************************************************************
293*874aeea5SJeff Kirsher  *
294*874aeea5SJeff Kirsher  * Generic buffer handling
295*874aeea5SJeff Kirsher  * These buffers are used for interrupt status and MAC stats
296*874aeea5SJeff Kirsher  *
297*874aeea5SJeff Kirsher  **************************************************************************/
298*874aeea5SJeff Kirsher 
299*874aeea5SJeff Kirsher int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
300*874aeea5SJeff Kirsher 			 unsigned int len)
301*874aeea5SJeff Kirsher {
302*874aeea5SJeff Kirsher 	buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
303*874aeea5SJeff Kirsher 					    &buffer->dma_addr);
304*874aeea5SJeff Kirsher 	if (!buffer->addr)
305*874aeea5SJeff Kirsher 		return -ENOMEM;
306*874aeea5SJeff Kirsher 	buffer->len = len;
307*874aeea5SJeff Kirsher 	memset(buffer->addr, 0, len);
308*874aeea5SJeff Kirsher 	return 0;
309*874aeea5SJeff Kirsher }
310*874aeea5SJeff Kirsher 
311*874aeea5SJeff Kirsher void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
312*874aeea5SJeff Kirsher {
313*874aeea5SJeff Kirsher 	if (buffer->addr) {
314*874aeea5SJeff Kirsher 		pci_free_consistent(efx->pci_dev, buffer->len,
315*874aeea5SJeff Kirsher 				    buffer->addr, buffer->dma_addr);
316*874aeea5SJeff Kirsher 		buffer->addr = NULL;
317*874aeea5SJeff Kirsher 	}
318*874aeea5SJeff Kirsher }
319*874aeea5SJeff Kirsher 
320*874aeea5SJeff Kirsher /**************************************************************************
321*874aeea5SJeff Kirsher  *
322*874aeea5SJeff Kirsher  * TX path
323*874aeea5SJeff Kirsher  *
324*874aeea5SJeff Kirsher  **************************************************************************/
325*874aeea5SJeff Kirsher 
326*874aeea5SJeff Kirsher /* Returns a pointer to the specified transmit descriptor in the TX
327*874aeea5SJeff Kirsher  * descriptor queue belonging to the specified channel.
328*874aeea5SJeff Kirsher  */
329*874aeea5SJeff Kirsher static inline efx_qword_t *
330*874aeea5SJeff Kirsher efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
331*874aeea5SJeff Kirsher {
332*874aeea5SJeff Kirsher 	return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
333*874aeea5SJeff Kirsher }
334*874aeea5SJeff Kirsher 
335*874aeea5SJeff Kirsher /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
336*874aeea5SJeff Kirsher static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
337*874aeea5SJeff Kirsher {
338*874aeea5SJeff Kirsher 	unsigned write_ptr;
339*874aeea5SJeff Kirsher 	efx_dword_t reg;
340*874aeea5SJeff Kirsher 
341*874aeea5SJeff Kirsher 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
342*874aeea5SJeff Kirsher 	EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
343*874aeea5SJeff Kirsher 	efx_writed_page(tx_queue->efx, &reg,
344*874aeea5SJeff Kirsher 			FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
345*874aeea5SJeff Kirsher }
346*874aeea5SJeff Kirsher 
347*874aeea5SJeff Kirsher /* Write pointer and first descriptor for TX descriptor ring */
348*874aeea5SJeff Kirsher static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
349*874aeea5SJeff Kirsher 				    const efx_qword_t *txd)
350*874aeea5SJeff Kirsher {
351*874aeea5SJeff Kirsher 	unsigned write_ptr;
352*874aeea5SJeff Kirsher 	efx_oword_t reg;
353*874aeea5SJeff Kirsher 
354*874aeea5SJeff Kirsher 	BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
355*874aeea5SJeff Kirsher 	BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
356*874aeea5SJeff Kirsher 
357*874aeea5SJeff Kirsher 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
358*874aeea5SJeff Kirsher 	EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
359*874aeea5SJeff Kirsher 			     FRF_AZ_TX_DESC_WPTR, write_ptr);
360*874aeea5SJeff Kirsher 	reg.qword[0] = *txd;
361*874aeea5SJeff Kirsher 	efx_writeo_page(tx_queue->efx, &reg,
362*874aeea5SJeff Kirsher 			FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
363*874aeea5SJeff Kirsher }
364*874aeea5SJeff Kirsher 
365*874aeea5SJeff Kirsher static inline bool
366*874aeea5SJeff Kirsher efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
367*874aeea5SJeff Kirsher {
368*874aeea5SJeff Kirsher 	unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
369*874aeea5SJeff Kirsher 
370*874aeea5SJeff Kirsher 	if (empty_read_count == 0)
371*874aeea5SJeff Kirsher 		return false;
372*874aeea5SJeff Kirsher 
373*874aeea5SJeff Kirsher 	tx_queue->empty_read_count = 0;
374*874aeea5SJeff Kirsher 	return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
375*874aeea5SJeff Kirsher }
376*874aeea5SJeff Kirsher 
377*874aeea5SJeff Kirsher /* For each entry inserted into the software descriptor ring, create a
378*874aeea5SJeff Kirsher  * descriptor in the hardware TX descriptor ring (in host memory), and
379*874aeea5SJeff Kirsher  * write a doorbell.
380*874aeea5SJeff Kirsher  */
381*874aeea5SJeff Kirsher void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
382*874aeea5SJeff Kirsher {
383*874aeea5SJeff Kirsher 
384*874aeea5SJeff Kirsher 	struct efx_tx_buffer *buffer;
385*874aeea5SJeff Kirsher 	efx_qword_t *txd;
386*874aeea5SJeff Kirsher 	unsigned write_ptr;
387*874aeea5SJeff Kirsher 	unsigned old_write_count = tx_queue->write_count;
388*874aeea5SJeff Kirsher 
389*874aeea5SJeff Kirsher 	BUG_ON(tx_queue->write_count == tx_queue->insert_count);
390*874aeea5SJeff Kirsher 
391*874aeea5SJeff Kirsher 	do {
392*874aeea5SJeff Kirsher 		write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
393*874aeea5SJeff Kirsher 		buffer = &tx_queue->buffer[write_ptr];
394*874aeea5SJeff Kirsher 		txd = efx_tx_desc(tx_queue, write_ptr);
395*874aeea5SJeff Kirsher 		++tx_queue->write_count;
396*874aeea5SJeff Kirsher 
397*874aeea5SJeff Kirsher 		/* Create TX descriptor ring entry */
398*874aeea5SJeff Kirsher 		EFX_POPULATE_QWORD_4(*txd,
399*874aeea5SJeff Kirsher 				     FSF_AZ_TX_KER_CONT, buffer->continuation,
400*874aeea5SJeff Kirsher 				     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
401*874aeea5SJeff Kirsher 				     FSF_AZ_TX_KER_BUF_REGION, 0,
402*874aeea5SJeff Kirsher 				     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
403*874aeea5SJeff Kirsher 	} while (tx_queue->write_count != tx_queue->insert_count);
404*874aeea5SJeff Kirsher 
405*874aeea5SJeff Kirsher 	wmb(); /* Ensure descriptors are written before they are fetched */
406*874aeea5SJeff Kirsher 
407*874aeea5SJeff Kirsher 	if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
408*874aeea5SJeff Kirsher 		txd = efx_tx_desc(tx_queue,
409*874aeea5SJeff Kirsher 				  old_write_count & tx_queue->ptr_mask);
410*874aeea5SJeff Kirsher 		efx_push_tx_desc(tx_queue, txd);
411*874aeea5SJeff Kirsher 		++tx_queue->pushes;
412*874aeea5SJeff Kirsher 	} else {
413*874aeea5SJeff Kirsher 		efx_notify_tx_desc(tx_queue);
414*874aeea5SJeff Kirsher 	}
415*874aeea5SJeff Kirsher }
416*874aeea5SJeff Kirsher 
417*874aeea5SJeff Kirsher /* Allocate hardware resources for a TX queue */
418*874aeea5SJeff Kirsher int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
419*874aeea5SJeff Kirsher {
420*874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
421*874aeea5SJeff Kirsher 	unsigned entries;
422*874aeea5SJeff Kirsher 
423*874aeea5SJeff Kirsher 	entries = tx_queue->ptr_mask + 1;
424*874aeea5SJeff Kirsher 	return efx_alloc_special_buffer(efx, &tx_queue->txd,
425*874aeea5SJeff Kirsher 					entries * sizeof(efx_qword_t));
426*874aeea5SJeff Kirsher }
427*874aeea5SJeff Kirsher 
428*874aeea5SJeff Kirsher void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
429*874aeea5SJeff Kirsher {
430*874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
431*874aeea5SJeff Kirsher 	efx_oword_t reg;
432*874aeea5SJeff Kirsher 
433*874aeea5SJeff Kirsher 	tx_queue->flushed = FLUSH_NONE;
434*874aeea5SJeff Kirsher 
435*874aeea5SJeff Kirsher 	/* Pin TX descriptor ring */
436*874aeea5SJeff Kirsher 	efx_init_special_buffer(efx, &tx_queue->txd);
437*874aeea5SJeff Kirsher 
438*874aeea5SJeff Kirsher 	/* Push TX descriptor ring to card */
439*874aeea5SJeff Kirsher 	EFX_POPULATE_OWORD_10(reg,
440*874aeea5SJeff Kirsher 			      FRF_AZ_TX_DESCQ_EN, 1,
441*874aeea5SJeff Kirsher 			      FRF_AZ_TX_ISCSI_DDIG_EN, 0,
442*874aeea5SJeff Kirsher 			      FRF_AZ_TX_ISCSI_HDIG_EN, 0,
443*874aeea5SJeff Kirsher 			      FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
444*874aeea5SJeff Kirsher 			      FRF_AZ_TX_DESCQ_EVQ_ID,
445*874aeea5SJeff Kirsher 			      tx_queue->channel->channel,
446*874aeea5SJeff Kirsher 			      FRF_AZ_TX_DESCQ_OWNER_ID, 0,
447*874aeea5SJeff Kirsher 			      FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
448*874aeea5SJeff Kirsher 			      FRF_AZ_TX_DESCQ_SIZE,
449*874aeea5SJeff Kirsher 			      __ffs(tx_queue->txd.entries),
450*874aeea5SJeff Kirsher 			      FRF_AZ_TX_DESCQ_TYPE, 0,
451*874aeea5SJeff Kirsher 			      FRF_BZ_TX_NON_IP_DROP_DIS, 1);
452*874aeea5SJeff Kirsher 
453*874aeea5SJeff Kirsher 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
454*874aeea5SJeff Kirsher 		int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
455*874aeea5SJeff Kirsher 		EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
456*874aeea5SJeff Kirsher 		EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
457*874aeea5SJeff Kirsher 				    !csum);
458*874aeea5SJeff Kirsher 	}
459*874aeea5SJeff Kirsher 
460*874aeea5SJeff Kirsher 	efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
461*874aeea5SJeff Kirsher 			 tx_queue->queue);
462*874aeea5SJeff Kirsher 
463*874aeea5SJeff Kirsher 	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
464*874aeea5SJeff Kirsher 		/* Only 128 bits in this register */
465*874aeea5SJeff Kirsher 		BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
466*874aeea5SJeff Kirsher 
467*874aeea5SJeff Kirsher 		efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
468*874aeea5SJeff Kirsher 		if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
469*874aeea5SJeff Kirsher 			clear_bit_le(tx_queue->queue, (void *)&reg);
470*874aeea5SJeff Kirsher 		else
471*874aeea5SJeff Kirsher 			set_bit_le(tx_queue->queue, (void *)&reg);
472*874aeea5SJeff Kirsher 		efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
473*874aeea5SJeff Kirsher 	}
474*874aeea5SJeff Kirsher 
475*874aeea5SJeff Kirsher 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
476*874aeea5SJeff Kirsher 		EFX_POPULATE_OWORD_1(reg,
477*874aeea5SJeff Kirsher 				     FRF_BZ_TX_PACE,
478*874aeea5SJeff Kirsher 				     (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
479*874aeea5SJeff Kirsher 				     FFE_BZ_TX_PACE_OFF :
480*874aeea5SJeff Kirsher 				     FFE_BZ_TX_PACE_RESERVED);
481*874aeea5SJeff Kirsher 		efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
482*874aeea5SJeff Kirsher 				 tx_queue->queue);
483*874aeea5SJeff Kirsher 	}
484*874aeea5SJeff Kirsher }
485*874aeea5SJeff Kirsher 
486*874aeea5SJeff Kirsher static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
487*874aeea5SJeff Kirsher {
488*874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
489*874aeea5SJeff Kirsher 	efx_oword_t tx_flush_descq;
490*874aeea5SJeff Kirsher 
491*874aeea5SJeff Kirsher 	tx_queue->flushed = FLUSH_PENDING;
492*874aeea5SJeff Kirsher 
493*874aeea5SJeff Kirsher 	/* Post a flush command */
494*874aeea5SJeff Kirsher 	EFX_POPULATE_OWORD_2(tx_flush_descq,
495*874aeea5SJeff Kirsher 			     FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
496*874aeea5SJeff Kirsher 			     FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
497*874aeea5SJeff Kirsher 	efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
498*874aeea5SJeff Kirsher }
499*874aeea5SJeff Kirsher 
500*874aeea5SJeff Kirsher void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
501*874aeea5SJeff Kirsher {
502*874aeea5SJeff Kirsher 	struct efx_nic *efx = tx_queue->efx;
503*874aeea5SJeff Kirsher 	efx_oword_t tx_desc_ptr;
504*874aeea5SJeff Kirsher 
505*874aeea5SJeff Kirsher 	/* The queue should have been flushed */
506*874aeea5SJeff Kirsher 	WARN_ON(tx_queue->flushed != FLUSH_DONE);
507*874aeea5SJeff Kirsher 
508*874aeea5SJeff Kirsher 	/* Remove TX descriptor ring from card */
509*874aeea5SJeff Kirsher 	EFX_ZERO_OWORD(tx_desc_ptr);
510*874aeea5SJeff Kirsher 	efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
511*874aeea5SJeff Kirsher 			 tx_queue->queue);
512*874aeea5SJeff Kirsher 
513*874aeea5SJeff Kirsher 	/* Unpin TX descriptor ring */
514*874aeea5SJeff Kirsher 	efx_fini_special_buffer(efx, &tx_queue->txd);
515*874aeea5SJeff Kirsher }
516*874aeea5SJeff Kirsher 
517*874aeea5SJeff Kirsher /* Free buffers backing TX queue */
518*874aeea5SJeff Kirsher void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
519*874aeea5SJeff Kirsher {
520*874aeea5SJeff Kirsher 	efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
521*874aeea5SJeff Kirsher }
522*874aeea5SJeff Kirsher 
523*874aeea5SJeff Kirsher /**************************************************************************
524*874aeea5SJeff Kirsher  *
525*874aeea5SJeff Kirsher  * RX path
526*874aeea5SJeff Kirsher  *
527*874aeea5SJeff Kirsher  **************************************************************************/
528*874aeea5SJeff Kirsher 
529*874aeea5SJeff Kirsher /* Returns a pointer to the specified descriptor in the RX descriptor queue */
530*874aeea5SJeff Kirsher static inline efx_qword_t *
531*874aeea5SJeff Kirsher efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
532*874aeea5SJeff Kirsher {
533*874aeea5SJeff Kirsher 	return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
534*874aeea5SJeff Kirsher }
535*874aeea5SJeff Kirsher 
536*874aeea5SJeff Kirsher /* This creates an entry in the RX descriptor queue */
537*874aeea5SJeff Kirsher static inline void
538*874aeea5SJeff Kirsher efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
539*874aeea5SJeff Kirsher {
540*874aeea5SJeff Kirsher 	struct efx_rx_buffer *rx_buf;
541*874aeea5SJeff Kirsher 	efx_qword_t *rxd;
542*874aeea5SJeff Kirsher 
543*874aeea5SJeff Kirsher 	rxd = efx_rx_desc(rx_queue, index);
544*874aeea5SJeff Kirsher 	rx_buf = efx_rx_buffer(rx_queue, index);
545*874aeea5SJeff Kirsher 	EFX_POPULATE_QWORD_3(*rxd,
546*874aeea5SJeff Kirsher 			     FSF_AZ_RX_KER_BUF_SIZE,
547*874aeea5SJeff Kirsher 			     rx_buf->len -
548*874aeea5SJeff Kirsher 			     rx_queue->efx->type->rx_buffer_padding,
549*874aeea5SJeff Kirsher 			     FSF_AZ_RX_KER_BUF_REGION, 0,
550*874aeea5SJeff Kirsher 			     FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
551*874aeea5SJeff Kirsher }
552*874aeea5SJeff Kirsher 
553*874aeea5SJeff Kirsher /* This writes to the RX_DESC_WPTR register for the specified receive
554*874aeea5SJeff Kirsher  * descriptor ring.
555*874aeea5SJeff Kirsher  */
556*874aeea5SJeff Kirsher void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
557*874aeea5SJeff Kirsher {
558*874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
559*874aeea5SJeff Kirsher 	efx_dword_t reg;
560*874aeea5SJeff Kirsher 	unsigned write_ptr;
561*874aeea5SJeff Kirsher 
562*874aeea5SJeff Kirsher 	while (rx_queue->notified_count != rx_queue->added_count) {
563*874aeea5SJeff Kirsher 		efx_build_rx_desc(
564*874aeea5SJeff Kirsher 			rx_queue,
565*874aeea5SJeff Kirsher 			rx_queue->notified_count & rx_queue->ptr_mask);
566*874aeea5SJeff Kirsher 		++rx_queue->notified_count;
567*874aeea5SJeff Kirsher 	}
568*874aeea5SJeff Kirsher 
569*874aeea5SJeff Kirsher 	wmb();
570*874aeea5SJeff Kirsher 	write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
571*874aeea5SJeff Kirsher 	EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
572*874aeea5SJeff Kirsher 	efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
573*874aeea5SJeff Kirsher 			efx_rx_queue_index(rx_queue));
574*874aeea5SJeff Kirsher }
575*874aeea5SJeff Kirsher 
576*874aeea5SJeff Kirsher int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
577*874aeea5SJeff Kirsher {
578*874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
579*874aeea5SJeff Kirsher 	unsigned entries;
580*874aeea5SJeff Kirsher 
581*874aeea5SJeff Kirsher 	entries = rx_queue->ptr_mask + 1;
582*874aeea5SJeff Kirsher 	return efx_alloc_special_buffer(efx, &rx_queue->rxd,
583*874aeea5SJeff Kirsher 					entries * sizeof(efx_qword_t));
584*874aeea5SJeff Kirsher }
585*874aeea5SJeff Kirsher 
586*874aeea5SJeff Kirsher void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
587*874aeea5SJeff Kirsher {
588*874aeea5SJeff Kirsher 	efx_oword_t rx_desc_ptr;
589*874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
590*874aeea5SJeff Kirsher 	bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
591*874aeea5SJeff Kirsher 	bool iscsi_digest_en = is_b0;
592*874aeea5SJeff Kirsher 
593*874aeea5SJeff Kirsher 	netif_dbg(efx, hw, efx->net_dev,
594*874aeea5SJeff Kirsher 		  "RX queue %d ring in special buffers %d-%d\n",
595*874aeea5SJeff Kirsher 		  efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
596*874aeea5SJeff Kirsher 		  rx_queue->rxd.index + rx_queue->rxd.entries - 1);
597*874aeea5SJeff Kirsher 
598*874aeea5SJeff Kirsher 	rx_queue->flushed = FLUSH_NONE;
599*874aeea5SJeff Kirsher 
600*874aeea5SJeff Kirsher 	/* Pin RX descriptor ring */
601*874aeea5SJeff Kirsher 	efx_init_special_buffer(efx, &rx_queue->rxd);
602*874aeea5SJeff Kirsher 
603*874aeea5SJeff Kirsher 	/* Push RX descriptor ring to card */
604*874aeea5SJeff Kirsher 	EFX_POPULATE_OWORD_10(rx_desc_ptr,
605*874aeea5SJeff Kirsher 			      FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
606*874aeea5SJeff Kirsher 			      FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
607*874aeea5SJeff Kirsher 			      FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
608*874aeea5SJeff Kirsher 			      FRF_AZ_RX_DESCQ_EVQ_ID,
609*874aeea5SJeff Kirsher 			      efx_rx_queue_channel(rx_queue)->channel,
610*874aeea5SJeff Kirsher 			      FRF_AZ_RX_DESCQ_OWNER_ID, 0,
611*874aeea5SJeff Kirsher 			      FRF_AZ_RX_DESCQ_LABEL,
612*874aeea5SJeff Kirsher 			      efx_rx_queue_index(rx_queue),
613*874aeea5SJeff Kirsher 			      FRF_AZ_RX_DESCQ_SIZE,
614*874aeea5SJeff Kirsher 			      __ffs(rx_queue->rxd.entries),
615*874aeea5SJeff Kirsher 			      FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
616*874aeea5SJeff Kirsher 			      /* For >=B0 this is scatter so disable */
617*874aeea5SJeff Kirsher 			      FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
618*874aeea5SJeff Kirsher 			      FRF_AZ_RX_DESCQ_EN, 1);
619*874aeea5SJeff Kirsher 	efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
620*874aeea5SJeff Kirsher 			 efx_rx_queue_index(rx_queue));
621*874aeea5SJeff Kirsher }
622*874aeea5SJeff Kirsher 
623*874aeea5SJeff Kirsher static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
624*874aeea5SJeff Kirsher {
625*874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
626*874aeea5SJeff Kirsher 	efx_oword_t rx_flush_descq;
627*874aeea5SJeff Kirsher 
628*874aeea5SJeff Kirsher 	rx_queue->flushed = FLUSH_PENDING;
629*874aeea5SJeff Kirsher 
630*874aeea5SJeff Kirsher 	/* Post a flush command */
631*874aeea5SJeff Kirsher 	EFX_POPULATE_OWORD_2(rx_flush_descq,
632*874aeea5SJeff Kirsher 			     FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
633*874aeea5SJeff Kirsher 			     FRF_AZ_RX_FLUSH_DESCQ,
634*874aeea5SJeff Kirsher 			     efx_rx_queue_index(rx_queue));
635*874aeea5SJeff Kirsher 	efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
636*874aeea5SJeff Kirsher }
637*874aeea5SJeff Kirsher 
638*874aeea5SJeff Kirsher void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
639*874aeea5SJeff Kirsher {
640*874aeea5SJeff Kirsher 	efx_oword_t rx_desc_ptr;
641*874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
642*874aeea5SJeff Kirsher 
643*874aeea5SJeff Kirsher 	/* The queue should already have been flushed */
644*874aeea5SJeff Kirsher 	WARN_ON(rx_queue->flushed != FLUSH_DONE);
645*874aeea5SJeff Kirsher 
646*874aeea5SJeff Kirsher 	/* Remove RX descriptor ring from card */
647*874aeea5SJeff Kirsher 	EFX_ZERO_OWORD(rx_desc_ptr);
648*874aeea5SJeff Kirsher 	efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
649*874aeea5SJeff Kirsher 			 efx_rx_queue_index(rx_queue));
650*874aeea5SJeff Kirsher 
651*874aeea5SJeff Kirsher 	/* Unpin RX descriptor ring */
652*874aeea5SJeff Kirsher 	efx_fini_special_buffer(efx, &rx_queue->rxd);
653*874aeea5SJeff Kirsher }
654*874aeea5SJeff Kirsher 
655*874aeea5SJeff Kirsher /* Free buffers backing RX queue */
656*874aeea5SJeff Kirsher void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
657*874aeea5SJeff Kirsher {
658*874aeea5SJeff Kirsher 	efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
659*874aeea5SJeff Kirsher }
660*874aeea5SJeff Kirsher 
661*874aeea5SJeff Kirsher /**************************************************************************
662*874aeea5SJeff Kirsher  *
663*874aeea5SJeff Kirsher  * Event queue processing
664*874aeea5SJeff Kirsher  * Event queues are processed by per-channel tasklets.
665*874aeea5SJeff Kirsher  *
666*874aeea5SJeff Kirsher  **************************************************************************/
667*874aeea5SJeff Kirsher 
668*874aeea5SJeff Kirsher /* Update a channel's event queue's read pointer (RPTR) register
669*874aeea5SJeff Kirsher  *
670*874aeea5SJeff Kirsher  * This writes the EVQ_RPTR_REG register for the specified channel's
671*874aeea5SJeff Kirsher  * event queue.
672*874aeea5SJeff Kirsher  */
673*874aeea5SJeff Kirsher void efx_nic_eventq_read_ack(struct efx_channel *channel)
674*874aeea5SJeff Kirsher {
675*874aeea5SJeff Kirsher 	efx_dword_t reg;
676*874aeea5SJeff Kirsher 	struct efx_nic *efx = channel->efx;
677*874aeea5SJeff Kirsher 
678*874aeea5SJeff Kirsher 	EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
679*874aeea5SJeff Kirsher 			     channel->eventq_read_ptr & channel->eventq_mask);
680*874aeea5SJeff Kirsher 	efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
681*874aeea5SJeff Kirsher 			 channel->channel);
682*874aeea5SJeff Kirsher }
683*874aeea5SJeff Kirsher 
684*874aeea5SJeff Kirsher /* Use HW to insert a SW defined event */
685*874aeea5SJeff Kirsher static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
686*874aeea5SJeff Kirsher {
687*874aeea5SJeff Kirsher 	efx_oword_t drv_ev_reg;
688*874aeea5SJeff Kirsher 
689*874aeea5SJeff Kirsher 	BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
690*874aeea5SJeff Kirsher 		     FRF_AZ_DRV_EV_DATA_WIDTH != 64);
691*874aeea5SJeff Kirsher 	drv_ev_reg.u32[0] = event->u32[0];
692*874aeea5SJeff Kirsher 	drv_ev_reg.u32[1] = event->u32[1];
693*874aeea5SJeff Kirsher 	drv_ev_reg.u32[2] = 0;
694*874aeea5SJeff Kirsher 	drv_ev_reg.u32[3] = 0;
695*874aeea5SJeff Kirsher 	EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
696*874aeea5SJeff Kirsher 	efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
697*874aeea5SJeff Kirsher }
698*874aeea5SJeff Kirsher 
699*874aeea5SJeff Kirsher /* Handle a transmit completion event
700*874aeea5SJeff Kirsher  *
701*874aeea5SJeff Kirsher  * The NIC batches TX completion events; the message we receive is of
702*874aeea5SJeff Kirsher  * the form "complete all TX events up to this index".
703*874aeea5SJeff Kirsher  */
704*874aeea5SJeff Kirsher static int
705*874aeea5SJeff Kirsher efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
706*874aeea5SJeff Kirsher {
707*874aeea5SJeff Kirsher 	unsigned int tx_ev_desc_ptr;
708*874aeea5SJeff Kirsher 	unsigned int tx_ev_q_label;
709*874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
710*874aeea5SJeff Kirsher 	struct efx_nic *efx = channel->efx;
711*874aeea5SJeff Kirsher 	int tx_packets = 0;
712*874aeea5SJeff Kirsher 
713*874aeea5SJeff Kirsher 	if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
714*874aeea5SJeff Kirsher 		/* Transmit completion */
715*874aeea5SJeff Kirsher 		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
716*874aeea5SJeff Kirsher 		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
717*874aeea5SJeff Kirsher 		tx_queue = efx_channel_get_tx_queue(
718*874aeea5SJeff Kirsher 			channel, tx_ev_q_label % EFX_TXQ_TYPES);
719*874aeea5SJeff Kirsher 		tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
720*874aeea5SJeff Kirsher 			      tx_queue->ptr_mask);
721*874aeea5SJeff Kirsher 		channel->irq_mod_score += tx_packets;
722*874aeea5SJeff Kirsher 		efx_xmit_done(tx_queue, tx_ev_desc_ptr);
723*874aeea5SJeff Kirsher 	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
724*874aeea5SJeff Kirsher 		/* Rewrite the FIFO write pointer */
725*874aeea5SJeff Kirsher 		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
726*874aeea5SJeff Kirsher 		tx_queue = efx_channel_get_tx_queue(
727*874aeea5SJeff Kirsher 			channel, tx_ev_q_label % EFX_TXQ_TYPES);
728*874aeea5SJeff Kirsher 
729*874aeea5SJeff Kirsher 		if (efx_dev_registered(efx))
730*874aeea5SJeff Kirsher 			netif_tx_lock(efx->net_dev);
731*874aeea5SJeff Kirsher 		efx_notify_tx_desc(tx_queue);
732*874aeea5SJeff Kirsher 		if (efx_dev_registered(efx))
733*874aeea5SJeff Kirsher 			netif_tx_unlock(efx->net_dev);
734*874aeea5SJeff Kirsher 	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
735*874aeea5SJeff Kirsher 		   EFX_WORKAROUND_10727(efx)) {
736*874aeea5SJeff Kirsher 		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
737*874aeea5SJeff Kirsher 	} else {
738*874aeea5SJeff Kirsher 		netif_err(efx, tx_err, efx->net_dev,
739*874aeea5SJeff Kirsher 			  "channel %d unexpected TX event "
740*874aeea5SJeff Kirsher 			  EFX_QWORD_FMT"\n", channel->channel,
741*874aeea5SJeff Kirsher 			  EFX_QWORD_VAL(*event));
742*874aeea5SJeff Kirsher 	}
743*874aeea5SJeff Kirsher 
744*874aeea5SJeff Kirsher 	return tx_packets;
745*874aeea5SJeff Kirsher }
746*874aeea5SJeff Kirsher 
747*874aeea5SJeff Kirsher /* Detect errors included in the rx_evt_pkt_ok bit. */
748*874aeea5SJeff Kirsher static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
749*874aeea5SJeff Kirsher 				 const efx_qword_t *event,
750*874aeea5SJeff Kirsher 				 bool *rx_ev_pkt_ok,
751*874aeea5SJeff Kirsher 				 bool *discard)
752*874aeea5SJeff Kirsher {
753*874aeea5SJeff Kirsher 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
754*874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
755*874aeea5SJeff Kirsher 	bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
756*874aeea5SJeff Kirsher 	bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
757*874aeea5SJeff Kirsher 	bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
758*874aeea5SJeff Kirsher 	bool rx_ev_other_err, rx_ev_pause_frm;
759*874aeea5SJeff Kirsher 	bool rx_ev_hdr_type, rx_ev_mcast_pkt;
760*874aeea5SJeff Kirsher 	unsigned rx_ev_pkt_type;
761*874aeea5SJeff Kirsher 
762*874aeea5SJeff Kirsher 	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
763*874aeea5SJeff Kirsher 	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
764*874aeea5SJeff Kirsher 	rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
765*874aeea5SJeff Kirsher 	rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
766*874aeea5SJeff Kirsher 	rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
767*874aeea5SJeff Kirsher 						 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
768*874aeea5SJeff Kirsher 	rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
769*874aeea5SJeff Kirsher 						  FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
770*874aeea5SJeff Kirsher 	rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
771*874aeea5SJeff Kirsher 						   FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
772*874aeea5SJeff Kirsher 	rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
773*874aeea5SJeff Kirsher 	rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
774*874aeea5SJeff Kirsher 	rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
775*874aeea5SJeff Kirsher 			  0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
776*874aeea5SJeff Kirsher 	rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
777*874aeea5SJeff Kirsher 
778*874aeea5SJeff Kirsher 	/* Every error apart from tobe_disc and pause_frm */
779*874aeea5SJeff Kirsher 	rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
780*874aeea5SJeff Kirsher 			   rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
781*874aeea5SJeff Kirsher 			   rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
782*874aeea5SJeff Kirsher 
783*874aeea5SJeff Kirsher 	/* Count errors that are not in MAC stats.  Ignore expected
784*874aeea5SJeff Kirsher 	 * checksum errors during self-test. */
785*874aeea5SJeff Kirsher 	if (rx_ev_frm_trunc)
786*874aeea5SJeff Kirsher 		++channel->n_rx_frm_trunc;
787*874aeea5SJeff Kirsher 	else if (rx_ev_tobe_disc)
788*874aeea5SJeff Kirsher 		++channel->n_rx_tobe_disc;
789*874aeea5SJeff Kirsher 	else if (!efx->loopback_selftest) {
790*874aeea5SJeff Kirsher 		if (rx_ev_ip_hdr_chksum_err)
791*874aeea5SJeff Kirsher 			++channel->n_rx_ip_hdr_chksum_err;
792*874aeea5SJeff Kirsher 		else if (rx_ev_tcp_udp_chksum_err)
793*874aeea5SJeff Kirsher 			++channel->n_rx_tcp_udp_chksum_err;
794*874aeea5SJeff Kirsher 	}
795*874aeea5SJeff Kirsher 
796*874aeea5SJeff Kirsher 	/* The frame must be discarded if any of these are true. */
797*874aeea5SJeff Kirsher 	*discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
798*874aeea5SJeff Kirsher 		    rx_ev_tobe_disc | rx_ev_pause_frm);
799*874aeea5SJeff Kirsher 
800*874aeea5SJeff Kirsher 	/* TOBE_DISC is expected on unicast mismatches; don't print out an
801*874aeea5SJeff Kirsher 	 * error message.  FRM_TRUNC indicates RXDP dropped the packet due
802*874aeea5SJeff Kirsher 	 * to a FIFO overflow.
803*874aeea5SJeff Kirsher 	 */
804*874aeea5SJeff Kirsher #ifdef EFX_ENABLE_DEBUG
805*874aeea5SJeff Kirsher 	if (rx_ev_other_err && net_ratelimit()) {
806*874aeea5SJeff Kirsher 		netif_dbg(efx, rx_err, efx->net_dev,
807*874aeea5SJeff Kirsher 			  " RX queue %d unexpected RX event "
808*874aeea5SJeff Kirsher 			  EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
809*874aeea5SJeff Kirsher 			  efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
810*874aeea5SJeff Kirsher 			  rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
811*874aeea5SJeff Kirsher 			  rx_ev_ip_hdr_chksum_err ?
812*874aeea5SJeff Kirsher 			  " [IP_HDR_CHKSUM_ERR]" : "",
813*874aeea5SJeff Kirsher 			  rx_ev_tcp_udp_chksum_err ?
814*874aeea5SJeff Kirsher 			  " [TCP_UDP_CHKSUM_ERR]" : "",
815*874aeea5SJeff Kirsher 			  rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
816*874aeea5SJeff Kirsher 			  rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
817*874aeea5SJeff Kirsher 			  rx_ev_drib_nib ? " [DRIB_NIB]" : "",
818*874aeea5SJeff Kirsher 			  rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
819*874aeea5SJeff Kirsher 			  rx_ev_pause_frm ? " [PAUSE]" : "");
820*874aeea5SJeff Kirsher 	}
821*874aeea5SJeff Kirsher #endif
822*874aeea5SJeff Kirsher }
823*874aeea5SJeff Kirsher 
824*874aeea5SJeff Kirsher /* Handle receive events that are not in-order. */
825*874aeea5SJeff Kirsher static void
826*874aeea5SJeff Kirsher efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
827*874aeea5SJeff Kirsher {
828*874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
829*874aeea5SJeff Kirsher 	unsigned expected, dropped;
830*874aeea5SJeff Kirsher 
831*874aeea5SJeff Kirsher 	expected = rx_queue->removed_count & rx_queue->ptr_mask;
832*874aeea5SJeff Kirsher 	dropped = (index - expected) & rx_queue->ptr_mask;
833*874aeea5SJeff Kirsher 	netif_info(efx, rx_err, efx->net_dev,
834*874aeea5SJeff Kirsher 		   "dropped %d events (index=%d expected=%d)\n",
835*874aeea5SJeff Kirsher 		   dropped, index, expected);
836*874aeea5SJeff Kirsher 
837*874aeea5SJeff Kirsher 	efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
838*874aeea5SJeff Kirsher 			   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
839*874aeea5SJeff Kirsher }
840*874aeea5SJeff Kirsher 
841*874aeea5SJeff Kirsher /* Handle a packet received event
842*874aeea5SJeff Kirsher  *
843*874aeea5SJeff Kirsher  * The NIC gives a "discard" flag if it's a unicast packet with the
844*874aeea5SJeff Kirsher  * wrong destination address
845*874aeea5SJeff Kirsher  * Also "is multicast" and "matches multicast filter" flags can be used to
846*874aeea5SJeff Kirsher  * discard non-matching multicast packets.
847*874aeea5SJeff Kirsher  */
848*874aeea5SJeff Kirsher static void
849*874aeea5SJeff Kirsher efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
850*874aeea5SJeff Kirsher {
851*874aeea5SJeff Kirsher 	unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
852*874aeea5SJeff Kirsher 	unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
853*874aeea5SJeff Kirsher 	unsigned expected_ptr;
854*874aeea5SJeff Kirsher 	bool rx_ev_pkt_ok, discard = false, checksummed;
855*874aeea5SJeff Kirsher 	struct efx_rx_queue *rx_queue;
856*874aeea5SJeff Kirsher 
857*874aeea5SJeff Kirsher 	/* Basic packet information */
858*874aeea5SJeff Kirsher 	rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
859*874aeea5SJeff Kirsher 	rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
860*874aeea5SJeff Kirsher 	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
861*874aeea5SJeff Kirsher 	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
862*874aeea5SJeff Kirsher 	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
863*874aeea5SJeff Kirsher 	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
864*874aeea5SJeff Kirsher 		channel->channel);
865*874aeea5SJeff Kirsher 
866*874aeea5SJeff Kirsher 	rx_queue = efx_channel_get_rx_queue(channel);
867*874aeea5SJeff Kirsher 
868*874aeea5SJeff Kirsher 	rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
869*874aeea5SJeff Kirsher 	expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
870*874aeea5SJeff Kirsher 	if (unlikely(rx_ev_desc_ptr != expected_ptr))
871*874aeea5SJeff Kirsher 		efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
872*874aeea5SJeff Kirsher 
873*874aeea5SJeff Kirsher 	if (likely(rx_ev_pkt_ok)) {
874*874aeea5SJeff Kirsher 		/* If packet is marked as OK and packet type is TCP/IP or
875*874aeea5SJeff Kirsher 		 * UDP/IP, then we can rely on the hardware checksum.
876*874aeea5SJeff Kirsher 		 */
877*874aeea5SJeff Kirsher 		checksummed =
878*874aeea5SJeff Kirsher 			rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
879*874aeea5SJeff Kirsher 			rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
880*874aeea5SJeff Kirsher 	} else {
881*874aeea5SJeff Kirsher 		efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
882*874aeea5SJeff Kirsher 		checksummed = false;
883*874aeea5SJeff Kirsher 	}
884*874aeea5SJeff Kirsher 
885*874aeea5SJeff Kirsher 	/* Detect multicast packets that didn't match the filter */
886*874aeea5SJeff Kirsher 	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
887*874aeea5SJeff Kirsher 	if (rx_ev_mcast_pkt) {
888*874aeea5SJeff Kirsher 		unsigned int rx_ev_mcast_hash_match =
889*874aeea5SJeff Kirsher 			EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
890*874aeea5SJeff Kirsher 
891*874aeea5SJeff Kirsher 		if (unlikely(!rx_ev_mcast_hash_match)) {
892*874aeea5SJeff Kirsher 			++channel->n_rx_mcast_mismatch;
893*874aeea5SJeff Kirsher 			discard = true;
894*874aeea5SJeff Kirsher 		}
895*874aeea5SJeff Kirsher 	}
896*874aeea5SJeff Kirsher 
897*874aeea5SJeff Kirsher 	channel->irq_mod_score += 2;
898*874aeea5SJeff Kirsher 
899*874aeea5SJeff Kirsher 	/* Handle received packet */
900*874aeea5SJeff Kirsher 	efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
901*874aeea5SJeff Kirsher 		      checksummed, discard);
902*874aeea5SJeff Kirsher }
903*874aeea5SJeff Kirsher 
904*874aeea5SJeff Kirsher static void
905*874aeea5SJeff Kirsher efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
906*874aeea5SJeff Kirsher {
907*874aeea5SJeff Kirsher 	struct efx_nic *efx = channel->efx;
908*874aeea5SJeff Kirsher 	unsigned code;
909*874aeea5SJeff Kirsher 
910*874aeea5SJeff Kirsher 	code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
911*874aeea5SJeff Kirsher 	if (code == EFX_CHANNEL_MAGIC_TEST(channel))
912*874aeea5SJeff Kirsher 		; /* ignore */
913*874aeea5SJeff Kirsher 	else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
914*874aeea5SJeff Kirsher 		/* The queue must be empty, so we won't receive any rx
915*874aeea5SJeff Kirsher 		 * events, so efx_process_channel() won't refill the
916*874aeea5SJeff Kirsher 		 * queue. Refill it here */
917*874aeea5SJeff Kirsher 		efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
918*874aeea5SJeff Kirsher 	else
919*874aeea5SJeff Kirsher 		netif_dbg(efx, hw, efx->net_dev, "channel %d received "
920*874aeea5SJeff Kirsher 			  "generated event "EFX_QWORD_FMT"\n",
921*874aeea5SJeff Kirsher 			  channel->channel, EFX_QWORD_VAL(*event));
922*874aeea5SJeff Kirsher }
923*874aeea5SJeff Kirsher 
924*874aeea5SJeff Kirsher static void
925*874aeea5SJeff Kirsher efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
926*874aeea5SJeff Kirsher {
927*874aeea5SJeff Kirsher 	struct efx_nic *efx = channel->efx;
928*874aeea5SJeff Kirsher 	unsigned int ev_sub_code;
929*874aeea5SJeff Kirsher 	unsigned int ev_sub_data;
930*874aeea5SJeff Kirsher 
931*874aeea5SJeff Kirsher 	ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
932*874aeea5SJeff Kirsher 	ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
933*874aeea5SJeff Kirsher 
934*874aeea5SJeff Kirsher 	switch (ev_sub_code) {
935*874aeea5SJeff Kirsher 	case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
936*874aeea5SJeff Kirsher 		netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
937*874aeea5SJeff Kirsher 			   channel->channel, ev_sub_data);
938*874aeea5SJeff Kirsher 		break;
939*874aeea5SJeff Kirsher 	case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
940*874aeea5SJeff Kirsher 		netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
941*874aeea5SJeff Kirsher 			   channel->channel, ev_sub_data);
942*874aeea5SJeff Kirsher 		break;
943*874aeea5SJeff Kirsher 	case FSE_AZ_EVQ_INIT_DONE_EV:
944*874aeea5SJeff Kirsher 		netif_dbg(efx, hw, efx->net_dev,
945*874aeea5SJeff Kirsher 			  "channel %d EVQ %d initialised\n",
946*874aeea5SJeff Kirsher 			  channel->channel, ev_sub_data);
947*874aeea5SJeff Kirsher 		break;
948*874aeea5SJeff Kirsher 	case FSE_AZ_SRM_UPD_DONE_EV:
949*874aeea5SJeff Kirsher 		netif_vdbg(efx, hw, efx->net_dev,
950*874aeea5SJeff Kirsher 			   "channel %d SRAM update done\n", channel->channel);
951*874aeea5SJeff Kirsher 		break;
952*874aeea5SJeff Kirsher 	case FSE_AZ_WAKE_UP_EV:
953*874aeea5SJeff Kirsher 		netif_vdbg(efx, hw, efx->net_dev,
954*874aeea5SJeff Kirsher 			   "channel %d RXQ %d wakeup event\n",
955*874aeea5SJeff Kirsher 			   channel->channel, ev_sub_data);
956*874aeea5SJeff Kirsher 		break;
957*874aeea5SJeff Kirsher 	case FSE_AZ_TIMER_EV:
958*874aeea5SJeff Kirsher 		netif_vdbg(efx, hw, efx->net_dev,
959*874aeea5SJeff Kirsher 			   "channel %d RX queue %d timer expired\n",
960*874aeea5SJeff Kirsher 			   channel->channel, ev_sub_data);
961*874aeea5SJeff Kirsher 		break;
962*874aeea5SJeff Kirsher 	case FSE_AA_RX_RECOVER_EV:
963*874aeea5SJeff Kirsher 		netif_err(efx, rx_err, efx->net_dev,
964*874aeea5SJeff Kirsher 			  "channel %d seen DRIVER RX_RESET event. "
965*874aeea5SJeff Kirsher 			"Resetting.\n", channel->channel);
966*874aeea5SJeff Kirsher 		atomic_inc(&efx->rx_reset);
967*874aeea5SJeff Kirsher 		efx_schedule_reset(efx,
968*874aeea5SJeff Kirsher 				   EFX_WORKAROUND_6555(efx) ?
969*874aeea5SJeff Kirsher 				   RESET_TYPE_RX_RECOVERY :
970*874aeea5SJeff Kirsher 				   RESET_TYPE_DISABLE);
971*874aeea5SJeff Kirsher 		break;
972*874aeea5SJeff Kirsher 	case FSE_BZ_RX_DSC_ERROR_EV:
973*874aeea5SJeff Kirsher 		netif_err(efx, rx_err, efx->net_dev,
974*874aeea5SJeff Kirsher 			  "RX DMA Q %d reports descriptor fetch error."
975*874aeea5SJeff Kirsher 			  " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
976*874aeea5SJeff Kirsher 		efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
977*874aeea5SJeff Kirsher 		break;
978*874aeea5SJeff Kirsher 	case FSE_BZ_TX_DSC_ERROR_EV:
979*874aeea5SJeff Kirsher 		netif_err(efx, tx_err, efx->net_dev,
980*874aeea5SJeff Kirsher 			  "TX DMA Q %d reports descriptor fetch error."
981*874aeea5SJeff Kirsher 			  " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
982*874aeea5SJeff Kirsher 		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
983*874aeea5SJeff Kirsher 		break;
984*874aeea5SJeff Kirsher 	default:
985*874aeea5SJeff Kirsher 		netif_vdbg(efx, hw, efx->net_dev,
986*874aeea5SJeff Kirsher 			   "channel %d unknown driver event code %d "
987*874aeea5SJeff Kirsher 			   "data %04x\n", channel->channel, ev_sub_code,
988*874aeea5SJeff Kirsher 			   ev_sub_data);
989*874aeea5SJeff Kirsher 		break;
990*874aeea5SJeff Kirsher 	}
991*874aeea5SJeff Kirsher }
992*874aeea5SJeff Kirsher 
993*874aeea5SJeff Kirsher int efx_nic_process_eventq(struct efx_channel *channel, int budget)
994*874aeea5SJeff Kirsher {
995*874aeea5SJeff Kirsher 	struct efx_nic *efx = channel->efx;
996*874aeea5SJeff Kirsher 	unsigned int read_ptr;
997*874aeea5SJeff Kirsher 	efx_qword_t event, *p_event;
998*874aeea5SJeff Kirsher 	int ev_code;
999*874aeea5SJeff Kirsher 	int tx_packets = 0;
1000*874aeea5SJeff Kirsher 	int spent = 0;
1001*874aeea5SJeff Kirsher 
1002*874aeea5SJeff Kirsher 	read_ptr = channel->eventq_read_ptr;
1003*874aeea5SJeff Kirsher 
1004*874aeea5SJeff Kirsher 	for (;;) {
1005*874aeea5SJeff Kirsher 		p_event = efx_event(channel, read_ptr);
1006*874aeea5SJeff Kirsher 		event = *p_event;
1007*874aeea5SJeff Kirsher 
1008*874aeea5SJeff Kirsher 		if (!efx_event_present(&event))
1009*874aeea5SJeff Kirsher 			/* End of events */
1010*874aeea5SJeff Kirsher 			break;
1011*874aeea5SJeff Kirsher 
1012*874aeea5SJeff Kirsher 		netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1013*874aeea5SJeff Kirsher 			   "channel %d event is "EFX_QWORD_FMT"\n",
1014*874aeea5SJeff Kirsher 			   channel->channel, EFX_QWORD_VAL(event));
1015*874aeea5SJeff Kirsher 
1016*874aeea5SJeff Kirsher 		/* Clear this event by marking it all ones */
1017*874aeea5SJeff Kirsher 		EFX_SET_QWORD(*p_event);
1018*874aeea5SJeff Kirsher 
1019*874aeea5SJeff Kirsher 		++read_ptr;
1020*874aeea5SJeff Kirsher 
1021*874aeea5SJeff Kirsher 		ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1022*874aeea5SJeff Kirsher 
1023*874aeea5SJeff Kirsher 		switch (ev_code) {
1024*874aeea5SJeff Kirsher 		case FSE_AZ_EV_CODE_RX_EV:
1025*874aeea5SJeff Kirsher 			efx_handle_rx_event(channel, &event);
1026*874aeea5SJeff Kirsher 			if (++spent == budget)
1027*874aeea5SJeff Kirsher 				goto out;
1028*874aeea5SJeff Kirsher 			break;
1029*874aeea5SJeff Kirsher 		case FSE_AZ_EV_CODE_TX_EV:
1030*874aeea5SJeff Kirsher 			tx_packets += efx_handle_tx_event(channel, &event);
1031*874aeea5SJeff Kirsher 			if (tx_packets > efx->txq_entries) {
1032*874aeea5SJeff Kirsher 				spent = budget;
1033*874aeea5SJeff Kirsher 				goto out;
1034*874aeea5SJeff Kirsher 			}
1035*874aeea5SJeff Kirsher 			break;
1036*874aeea5SJeff Kirsher 		case FSE_AZ_EV_CODE_DRV_GEN_EV:
1037*874aeea5SJeff Kirsher 			efx_handle_generated_event(channel, &event);
1038*874aeea5SJeff Kirsher 			break;
1039*874aeea5SJeff Kirsher 		case FSE_AZ_EV_CODE_DRIVER_EV:
1040*874aeea5SJeff Kirsher 			efx_handle_driver_event(channel, &event);
1041*874aeea5SJeff Kirsher 			break;
1042*874aeea5SJeff Kirsher 		case FSE_CZ_EV_CODE_MCDI_EV:
1043*874aeea5SJeff Kirsher 			efx_mcdi_process_event(channel, &event);
1044*874aeea5SJeff Kirsher 			break;
1045*874aeea5SJeff Kirsher 		case FSE_AZ_EV_CODE_GLOBAL_EV:
1046*874aeea5SJeff Kirsher 			if (efx->type->handle_global_event &&
1047*874aeea5SJeff Kirsher 			    efx->type->handle_global_event(channel, &event))
1048*874aeea5SJeff Kirsher 				break;
1049*874aeea5SJeff Kirsher 			/* else fall through */
1050*874aeea5SJeff Kirsher 		default:
1051*874aeea5SJeff Kirsher 			netif_err(channel->efx, hw, channel->efx->net_dev,
1052*874aeea5SJeff Kirsher 				  "channel %d unknown event type %d (data "
1053*874aeea5SJeff Kirsher 				  EFX_QWORD_FMT ")\n", channel->channel,
1054*874aeea5SJeff Kirsher 				  ev_code, EFX_QWORD_VAL(event));
1055*874aeea5SJeff Kirsher 		}
1056*874aeea5SJeff Kirsher 	}
1057*874aeea5SJeff Kirsher 
1058*874aeea5SJeff Kirsher out:
1059*874aeea5SJeff Kirsher 	channel->eventq_read_ptr = read_ptr;
1060*874aeea5SJeff Kirsher 	return spent;
1061*874aeea5SJeff Kirsher }
1062*874aeea5SJeff Kirsher 
1063*874aeea5SJeff Kirsher /* Check whether an event is present in the eventq at the current
1064*874aeea5SJeff Kirsher  * read pointer.  Only useful for self-test.
1065*874aeea5SJeff Kirsher  */
1066*874aeea5SJeff Kirsher bool efx_nic_event_present(struct efx_channel *channel)
1067*874aeea5SJeff Kirsher {
1068*874aeea5SJeff Kirsher 	return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
1069*874aeea5SJeff Kirsher }
1070*874aeea5SJeff Kirsher 
1071*874aeea5SJeff Kirsher /* Allocate buffer table entries for event queue */
1072*874aeea5SJeff Kirsher int efx_nic_probe_eventq(struct efx_channel *channel)
1073*874aeea5SJeff Kirsher {
1074*874aeea5SJeff Kirsher 	struct efx_nic *efx = channel->efx;
1075*874aeea5SJeff Kirsher 	unsigned entries;
1076*874aeea5SJeff Kirsher 
1077*874aeea5SJeff Kirsher 	entries = channel->eventq_mask + 1;
1078*874aeea5SJeff Kirsher 	return efx_alloc_special_buffer(efx, &channel->eventq,
1079*874aeea5SJeff Kirsher 					entries * sizeof(efx_qword_t));
1080*874aeea5SJeff Kirsher }
1081*874aeea5SJeff Kirsher 
1082*874aeea5SJeff Kirsher void efx_nic_init_eventq(struct efx_channel *channel)
1083*874aeea5SJeff Kirsher {
1084*874aeea5SJeff Kirsher 	efx_oword_t reg;
1085*874aeea5SJeff Kirsher 	struct efx_nic *efx = channel->efx;
1086*874aeea5SJeff Kirsher 
1087*874aeea5SJeff Kirsher 	netif_dbg(efx, hw, efx->net_dev,
1088*874aeea5SJeff Kirsher 		  "channel %d event queue in special buffers %d-%d\n",
1089*874aeea5SJeff Kirsher 		  channel->channel, channel->eventq.index,
1090*874aeea5SJeff Kirsher 		  channel->eventq.index + channel->eventq.entries - 1);
1091*874aeea5SJeff Kirsher 
1092*874aeea5SJeff Kirsher 	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1093*874aeea5SJeff Kirsher 		EFX_POPULATE_OWORD_3(reg,
1094*874aeea5SJeff Kirsher 				     FRF_CZ_TIMER_Q_EN, 1,
1095*874aeea5SJeff Kirsher 				     FRF_CZ_HOST_NOTIFY_MODE, 0,
1096*874aeea5SJeff Kirsher 				     FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1097*874aeea5SJeff Kirsher 		efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1098*874aeea5SJeff Kirsher 	}
1099*874aeea5SJeff Kirsher 
1100*874aeea5SJeff Kirsher 	/* Pin event queue buffer */
1101*874aeea5SJeff Kirsher 	efx_init_special_buffer(efx, &channel->eventq);
1102*874aeea5SJeff Kirsher 
1103*874aeea5SJeff Kirsher 	/* Fill event queue with all ones (i.e. empty events) */
1104*874aeea5SJeff Kirsher 	memset(channel->eventq.addr, 0xff, channel->eventq.len);
1105*874aeea5SJeff Kirsher 
1106*874aeea5SJeff Kirsher 	/* Push event queue to card */
1107*874aeea5SJeff Kirsher 	EFX_POPULATE_OWORD_3(reg,
1108*874aeea5SJeff Kirsher 			     FRF_AZ_EVQ_EN, 1,
1109*874aeea5SJeff Kirsher 			     FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1110*874aeea5SJeff Kirsher 			     FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1111*874aeea5SJeff Kirsher 	efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1112*874aeea5SJeff Kirsher 			 channel->channel);
1113*874aeea5SJeff Kirsher 
1114*874aeea5SJeff Kirsher 	efx->type->push_irq_moderation(channel);
1115*874aeea5SJeff Kirsher }
1116*874aeea5SJeff Kirsher 
1117*874aeea5SJeff Kirsher void efx_nic_fini_eventq(struct efx_channel *channel)
1118*874aeea5SJeff Kirsher {
1119*874aeea5SJeff Kirsher 	efx_oword_t reg;
1120*874aeea5SJeff Kirsher 	struct efx_nic *efx = channel->efx;
1121*874aeea5SJeff Kirsher 
1122*874aeea5SJeff Kirsher 	/* Remove event queue from card */
1123*874aeea5SJeff Kirsher 	EFX_ZERO_OWORD(reg);
1124*874aeea5SJeff Kirsher 	efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1125*874aeea5SJeff Kirsher 			 channel->channel);
1126*874aeea5SJeff Kirsher 	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1127*874aeea5SJeff Kirsher 		efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1128*874aeea5SJeff Kirsher 
1129*874aeea5SJeff Kirsher 	/* Unpin event queue */
1130*874aeea5SJeff Kirsher 	efx_fini_special_buffer(efx, &channel->eventq);
1131*874aeea5SJeff Kirsher }
1132*874aeea5SJeff Kirsher 
1133*874aeea5SJeff Kirsher /* Free buffers backing event queue */
1134*874aeea5SJeff Kirsher void efx_nic_remove_eventq(struct efx_channel *channel)
1135*874aeea5SJeff Kirsher {
1136*874aeea5SJeff Kirsher 	efx_free_special_buffer(channel->efx, &channel->eventq);
1137*874aeea5SJeff Kirsher }
1138*874aeea5SJeff Kirsher 
1139*874aeea5SJeff Kirsher 
1140*874aeea5SJeff Kirsher void efx_nic_generate_test_event(struct efx_channel *channel)
1141*874aeea5SJeff Kirsher {
1142*874aeea5SJeff Kirsher 	unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
1143*874aeea5SJeff Kirsher 	efx_qword_t test_event;
1144*874aeea5SJeff Kirsher 
1145*874aeea5SJeff Kirsher 	EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1146*874aeea5SJeff Kirsher 			     FSE_AZ_EV_CODE_DRV_GEN_EV,
1147*874aeea5SJeff Kirsher 			     FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1148*874aeea5SJeff Kirsher 	efx_generate_event(channel, &test_event);
1149*874aeea5SJeff Kirsher }
1150*874aeea5SJeff Kirsher 
1151*874aeea5SJeff Kirsher void efx_nic_generate_fill_event(struct efx_channel *channel)
1152*874aeea5SJeff Kirsher {
1153*874aeea5SJeff Kirsher 	unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
1154*874aeea5SJeff Kirsher 	efx_qword_t test_event;
1155*874aeea5SJeff Kirsher 
1156*874aeea5SJeff Kirsher 	EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1157*874aeea5SJeff Kirsher 			     FSE_AZ_EV_CODE_DRV_GEN_EV,
1158*874aeea5SJeff Kirsher 			     FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1159*874aeea5SJeff Kirsher 	efx_generate_event(channel, &test_event);
1160*874aeea5SJeff Kirsher }
1161*874aeea5SJeff Kirsher 
1162*874aeea5SJeff Kirsher /**************************************************************************
1163*874aeea5SJeff Kirsher  *
1164*874aeea5SJeff Kirsher  * Flush handling
1165*874aeea5SJeff Kirsher  *
1166*874aeea5SJeff Kirsher  **************************************************************************/
1167*874aeea5SJeff Kirsher 
1168*874aeea5SJeff Kirsher 
1169*874aeea5SJeff Kirsher static void efx_poll_flush_events(struct efx_nic *efx)
1170*874aeea5SJeff Kirsher {
1171*874aeea5SJeff Kirsher 	struct efx_channel *channel = efx_get_channel(efx, 0);
1172*874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
1173*874aeea5SJeff Kirsher 	struct efx_rx_queue *rx_queue;
1174*874aeea5SJeff Kirsher 	unsigned int read_ptr = channel->eventq_read_ptr;
1175*874aeea5SJeff Kirsher 	unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
1176*874aeea5SJeff Kirsher 
1177*874aeea5SJeff Kirsher 	do {
1178*874aeea5SJeff Kirsher 		efx_qword_t *event = efx_event(channel, read_ptr);
1179*874aeea5SJeff Kirsher 		int ev_code, ev_sub_code, ev_queue;
1180*874aeea5SJeff Kirsher 		bool ev_failed;
1181*874aeea5SJeff Kirsher 
1182*874aeea5SJeff Kirsher 		if (!efx_event_present(event))
1183*874aeea5SJeff Kirsher 			break;
1184*874aeea5SJeff Kirsher 
1185*874aeea5SJeff Kirsher 		ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1186*874aeea5SJeff Kirsher 		ev_sub_code = EFX_QWORD_FIELD(*event,
1187*874aeea5SJeff Kirsher 					      FSF_AZ_DRIVER_EV_SUBCODE);
1188*874aeea5SJeff Kirsher 		if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1189*874aeea5SJeff Kirsher 		    ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1190*874aeea5SJeff Kirsher 			ev_queue = EFX_QWORD_FIELD(*event,
1191*874aeea5SJeff Kirsher 						   FSF_AZ_DRIVER_EV_SUBDATA);
1192*874aeea5SJeff Kirsher 			if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1193*874aeea5SJeff Kirsher 				tx_queue = efx_get_tx_queue(
1194*874aeea5SJeff Kirsher 					efx, ev_queue / EFX_TXQ_TYPES,
1195*874aeea5SJeff Kirsher 					ev_queue % EFX_TXQ_TYPES);
1196*874aeea5SJeff Kirsher 				tx_queue->flushed = FLUSH_DONE;
1197*874aeea5SJeff Kirsher 			}
1198*874aeea5SJeff Kirsher 		} else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1199*874aeea5SJeff Kirsher 			   ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1200*874aeea5SJeff Kirsher 			ev_queue = EFX_QWORD_FIELD(
1201*874aeea5SJeff Kirsher 				*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1202*874aeea5SJeff Kirsher 			ev_failed = EFX_QWORD_FIELD(
1203*874aeea5SJeff Kirsher 				*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1204*874aeea5SJeff Kirsher 			if (ev_queue < efx->n_rx_channels) {
1205*874aeea5SJeff Kirsher 				rx_queue = efx_get_rx_queue(efx, ev_queue);
1206*874aeea5SJeff Kirsher 				rx_queue->flushed =
1207*874aeea5SJeff Kirsher 					ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1208*874aeea5SJeff Kirsher 			}
1209*874aeea5SJeff Kirsher 		}
1210*874aeea5SJeff Kirsher 
1211*874aeea5SJeff Kirsher 		/* We're about to destroy the queue anyway, so
1212*874aeea5SJeff Kirsher 		 * it's ok to throw away every non-flush event */
1213*874aeea5SJeff Kirsher 		EFX_SET_QWORD(*event);
1214*874aeea5SJeff Kirsher 
1215*874aeea5SJeff Kirsher 		++read_ptr;
1216*874aeea5SJeff Kirsher 	} while (read_ptr != end_ptr);
1217*874aeea5SJeff Kirsher 
1218*874aeea5SJeff Kirsher 	channel->eventq_read_ptr = read_ptr;
1219*874aeea5SJeff Kirsher }
1220*874aeea5SJeff Kirsher 
1221*874aeea5SJeff Kirsher /* Handle tx and rx flushes at the same time, since they run in
1222*874aeea5SJeff Kirsher  * parallel in the hardware and there's no reason for us to
1223*874aeea5SJeff Kirsher  * serialise them */
1224*874aeea5SJeff Kirsher int efx_nic_flush_queues(struct efx_nic *efx)
1225*874aeea5SJeff Kirsher {
1226*874aeea5SJeff Kirsher 	struct efx_channel *channel;
1227*874aeea5SJeff Kirsher 	struct efx_rx_queue *rx_queue;
1228*874aeea5SJeff Kirsher 	struct efx_tx_queue *tx_queue;
1229*874aeea5SJeff Kirsher 	int i, tx_pending, rx_pending;
1230*874aeea5SJeff Kirsher 
1231*874aeea5SJeff Kirsher 	/* If necessary prepare the hardware for flushing */
1232*874aeea5SJeff Kirsher 	efx->type->prepare_flush(efx);
1233*874aeea5SJeff Kirsher 
1234*874aeea5SJeff Kirsher 	/* Flush all tx queues in parallel */
1235*874aeea5SJeff Kirsher 	efx_for_each_channel(channel, efx) {
1236*874aeea5SJeff Kirsher 		efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1237*874aeea5SJeff Kirsher 			if (tx_queue->initialised)
1238*874aeea5SJeff Kirsher 				efx_flush_tx_queue(tx_queue);
1239*874aeea5SJeff Kirsher 		}
1240*874aeea5SJeff Kirsher 	}
1241*874aeea5SJeff Kirsher 
1242*874aeea5SJeff Kirsher 	/* The hardware supports four concurrent rx flushes, each of which may
1243*874aeea5SJeff Kirsher 	 * need to be retried if there is an outstanding descriptor fetch */
1244*874aeea5SJeff Kirsher 	for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
1245*874aeea5SJeff Kirsher 		rx_pending = tx_pending = 0;
1246*874aeea5SJeff Kirsher 		efx_for_each_channel(channel, efx) {
1247*874aeea5SJeff Kirsher 			efx_for_each_channel_rx_queue(rx_queue, channel) {
1248*874aeea5SJeff Kirsher 				if (rx_queue->flushed == FLUSH_PENDING)
1249*874aeea5SJeff Kirsher 					++rx_pending;
1250*874aeea5SJeff Kirsher 			}
1251*874aeea5SJeff Kirsher 		}
1252*874aeea5SJeff Kirsher 		efx_for_each_channel(channel, efx) {
1253*874aeea5SJeff Kirsher 			efx_for_each_channel_rx_queue(rx_queue, channel) {
1254*874aeea5SJeff Kirsher 				if (rx_pending == EFX_RX_FLUSH_COUNT)
1255*874aeea5SJeff Kirsher 					break;
1256*874aeea5SJeff Kirsher 				if (rx_queue->flushed == FLUSH_FAILED ||
1257*874aeea5SJeff Kirsher 				    rx_queue->flushed == FLUSH_NONE) {
1258*874aeea5SJeff Kirsher 					efx_flush_rx_queue(rx_queue);
1259*874aeea5SJeff Kirsher 					++rx_pending;
1260*874aeea5SJeff Kirsher 				}
1261*874aeea5SJeff Kirsher 			}
1262*874aeea5SJeff Kirsher 			efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1263*874aeea5SJeff Kirsher 				if (tx_queue->initialised &&
1264*874aeea5SJeff Kirsher 				    tx_queue->flushed != FLUSH_DONE)
1265*874aeea5SJeff Kirsher 					++tx_pending;
1266*874aeea5SJeff Kirsher 			}
1267*874aeea5SJeff Kirsher 		}
1268*874aeea5SJeff Kirsher 
1269*874aeea5SJeff Kirsher 		if (rx_pending == 0 && tx_pending == 0)
1270*874aeea5SJeff Kirsher 			return 0;
1271*874aeea5SJeff Kirsher 
1272*874aeea5SJeff Kirsher 		msleep(EFX_FLUSH_INTERVAL);
1273*874aeea5SJeff Kirsher 		efx_poll_flush_events(efx);
1274*874aeea5SJeff Kirsher 	}
1275*874aeea5SJeff Kirsher 
1276*874aeea5SJeff Kirsher 	/* Mark the queues as all flushed. We're going to return failure
1277*874aeea5SJeff Kirsher 	 * leading to a reset, or fake up success anyway */
1278*874aeea5SJeff Kirsher 	efx_for_each_channel(channel, efx) {
1279*874aeea5SJeff Kirsher 		efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1280*874aeea5SJeff Kirsher 			if (tx_queue->initialised &&
1281*874aeea5SJeff Kirsher 			    tx_queue->flushed != FLUSH_DONE)
1282*874aeea5SJeff Kirsher 				netif_err(efx, hw, efx->net_dev,
1283*874aeea5SJeff Kirsher 					  "tx queue %d flush command timed out\n",
1284*874aeea5SJeff Kirsher 					  tx_queue->queue);
1285*874aeea5SJeff Kirsher 			tx_queue->flushed = FLUSH_DONE;
1286*874aeea5SJeff Kirsher 		}
1287*874aeea5SJeff Kirsher 		efx_for_each_channel_rx_queue(rx_queue, channel) {
1288*874aeea5SJeff Kirsher 			if (rx_queue->flushed != FLUSH_DONE)
1289*874aeea5SJeff Kirsher 				netif_err(efx, hw, efx->net_dev,
1290*874aeea5SJeff Kirsher 					  "rx queue %d flush command timed out\n",
1291*874aeea5SJeff Kirsher 					  efx_rx_queue_index(rx_queue));
1292*874aeea5SJeff Kirsher 			rx_queue->flushed = FLUSH_DONE;
1293*874aeea5SJeff Kirsher 		}
1294*874aeea5SJeff Kirsher 	}
1295*874aeea5SJeff Kirsher 
1296*874aeea5SJeff Kirsher 	return -ETIMEDOUT;
1297*874aeea5SJeff Kirsher }
1298*874aeea5SJeff Kirsher 
1299*874aeea5SJeff Kirsher /**************************************************************************
1300*874aeea5SJeff Kirsher  *
1301*874aeea5SJeff Kirsher  * Hardware interrupts
1302*874aeea5SJeff Kirsher  * The hardware interrupt handler does very little work; all the event
1303*874aeea5SJeff Kirsher  * queue processing is carried out by per-channel tasklets.
1304*874aeea5SJeff Kirsher  *
1305*874aeea5SJeff Kirsher  **************************************************************************/
1306*874aeea5SJeff Kirsher 
1307*874aeea5SJeff Kirsher /* Enable/disable/generate interrupts */
1308*874aeea5SJeff Kirsher static inline void efx_nic_interrupts(struct efx_nic *efx,
1309*874aeea5SJeff Kirsher 				      bool enabled, bool force)
1310*874aeea5SJeff Kirsher {
1311*874aeea5SJeff Kirsher 	efx_oword_t int_en_reg_ker;
1312*874aeea5SJeff Kirsher 
1313*874aeea5SJeff Kirsher 	EFX_POPULATE_OWORD_3(int_en_reg_ker,
1314*874aeea5SJeff Kirsher 			     FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
1315*874aeea5SJeff Kirsher 			     FRF_AZ_KER_INT_KER, force,
1316*874aeea5SJeff Kirsher 			     FRF_AZ_DRV_INT_EN_KER, enabled);
1317*874aeea5SJeff Kirsher 	efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1318*874aeea5SJeff Kirsher }
1319*874aeea5SJeff Kirsher 
1320*874aeea5SJeff Kirsher void efx_nic_enable_interrupts(struct efx_nic *efx)
1321*874aeea5SJeff Kirsher {
1322*874aeea5SJeff Kirsher 	struct efx_channel *channel;
1323*874aeea5SJeff Kirsher 
1324*874aeea5SJeff Kirsher 	EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1325*874aeea5SJeff Kirsher 	wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1326*874aeea5SJeff Kirsher 
1327*874aeea5SJeff Kirsher 	/* Enable interrupts */
1328*874aeea5SJeff Kirsher 	efx_nic_interrupts(efx, true, false);
1329*874aeea5SJeff Kirsher 
1330*874aeea5SJeff Kirsher 	/* Force processing of all the channels to get the EVQ RPTRs up to
1331*874aeea5SJeff Kirsher 	   date */
1332*874aeea5SJeff Kirsher 	efx_for_each_channel(channel, efx)
1333*874aeea5SJeff Kirsher 		efx_schedule_channel(channel);
1334*874aeea5SJeff Kirsher }
1335*874aeea5SJeff Kirsher 
1336*874aeea5SJeff Kirsher void efx_nic_disable_interrupts(struct efx_nic *efx)
1337*874aeea5SJeff Kirsher {
1338*874aeea5SJeff Kirsher 	/* Disable interrupts */
1339*874aeea5SJeff Kirsher 	efx_nic_interrupts(efx, false, false);
1340*874aeea5SJeff Kirsher }
1341*874aeea5SJeff Kirsher 
1342*874aeea5SJeff Kirsher /* Generate a test interrupt
1343*874aeea5SJeff Kirsher  * Interrupt must already have been enabled, otherwise nasty things
1344*874aeea5SJeff Kirsher  * may happen.
1345*874aeea5SJeff Kirsher  */
1346*874aeea5SJeff Kirsher void efx_nic_generate_interrupt(struct efx_nic *efx)
1347*874aeea5SJeff Kirsher {
1348*874aeea5SJeff Kirsher 	efx_nic_interrupts(efx, true, true);
1349*874aeea5SJeff Kirsher }
1350*874aeea5SJeff Kirsher 
1351*874aeea5SJeff Kirsher /* Process a fatal interrupt
1352*874aeea5SJeff Kirsher  * Disable bus mastering ASAP and schedule a reset
1353*874aeea5SJeff Kirsher  */
1354*874aeea5SJeff Kirsher irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1355*874aeea5SJeff Kirsher {
1356*874aeea5SJeff Kirsher 	struct falcon_nic_data *nic_data = efx->nic_data;
1357*874aeea5SJeff Kirsher 	efx_oword_t *int_ker = efx->irq_status.addr;
1358*874aeea5SJeff Kirsher 	efx_oword_t fatal_intr;
1359*874aeea5SJeff Kirsher 	int error, mem_perr;
1360*874aeea5SJeff Kirsher 
1361*874aeea5SJeff Kirsher 	efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1362*874aeea5SJeff Kirsher 	error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1363*874aeea5SJeff Kirsher 
1364*874aeea5SJeff Kirsher 	netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1365*874aeea5SJeff Kirsher 		  EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1366*874aeea5SJeff Kirsher 		  EFX_OWORD_VAL(fatal_intr),
1367*874aeea5SJeff Kirsher 		  error ? "disabling bus mastering" : "no recognised error");
1368*874aeea5SJeff Kirsher 
1369*874aeea5SJeff Kirsher 	/* If this is a memory parity error dump which blocks are offending */
1370*874aeea5SJeff Kirsher 	mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1371*874aeea5SJeff Kirsher 		    EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1372*874aeea5SJeff Kirsher 	if (mem_perr) {
1373*874aeea5SJeff Kirsher 		efx_oword_t reg;
1374*874aeea5SJeff Kirsher 		efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1375*874aeea5SJeff Kirsher 		netif_err(efx, hw, efx->net_dev,
1376*874aeea5SJeff Kirsher 			  "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1377*874aeea5SJeff Kirsher 			  EFX_OWORD_VAL(reg));
1378*874aeea5SJeff Kirsher 	}
1379*874aeea5SJeff Kirsher 
1380*874aeea5SJeff Kirsher 	/* Disable both devices */
1381*874aeea5SJeff Kirsher 	pci_clear_master(efx->pci_dev);
1382*874aeea5SJeff Kirsher 	if (efx_nic_is_dual_func(efx))
1383*874aeea5SJeff Kirsher 		pci_clear_master(nic_data->pci_dev2);
1384*874aeea5SJeff Kirsher 	efx_nic_disable_interrupts(efx);
1385*874aeea5SJeff Kirsher 
1386*874aeea5SJeff Kirsher 	/* Count errors and reset or disable the NIC accordingly */
1387*874aeea5SJeff Kirsher 	if (efx->int_error_count == 0 ||
1388*874aeea5SJeff Kirsher 	    time_after(jiffies, efx->int_error_expire)) {
1389*874aeea5SJeff Kirsher 		efx->int_error_count = 0;
1390*874aeea5SJeff Kirsher 		efx->int_error_expire =
1391*874aeea5SJeff Kirsher 			jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1392*874aeea5SJeff Kirsher 	}
1393*874aeea5SJeff Kirsher 	if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1394*874aeea5SJeff Kirsher 		netif_err(efx, hw, efx->net_dev,
1395*874aeea5SJeff Kirsher 			  "SYSTEM ERROR - reset scheduled\n");
1396*874aeea5SJeff Kirsher 		efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1397*874aeea5SJeff Kirsher 	} else {
1398*874aeea5SJeff Kirsher 		netif_err(efx, hw, efx->net_dev,
1399*874aeea5SJeff Kirsher 			  "SYSTEM ERROR - max number of errors seen."
1400*874aeea5SJeff Kirsher 			  "NIC will be disabled\n");
1401*874aeea5SJeff Kirsher 		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1402*874aeea5SJeff Kirsher 	}
1403*874aeea5SJeff Kirsher 
1404*874aeea5SJeff Kirsher 	return IRQ_HANDLED;
1405*874aeea5SJeff Kirsher }
1406*874aeea5SJeff Kirsher 
1407*874aeea5SJeff Kirsher /* Handle a legacy interrupt
1408*874aeea5SJeff Kirsher  * Acknowledges the interrupt and schedule event queue processing.
1409*874aeea5SJeff Kirsher  */
1410*874aeea5SJeff Kirsher static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1411*874aeea5SJeff Kirsher {
1412*874aeea5SJeff Kirsher 	struct efx_nic *efx = dev_id;
1413*874aeea5SJeff Kirsher 	efx_oword_t *int_ker = efx->irq_status.addr;
1414*874aeea5SJeff Kirsher 	irqreturn_t result = IRQ_NONE;
1415*874aeea5SJeff Kirsher 	struct efx_channel *channel;
1416*874aeea5SJeff Kirsher 	efx_dword_t reg;
1417*874aeea5SJeff Kirsher 	u32 queues;
1418*874aeea5SJeff Kirsher 	int syserr;
1419*874aeea5SJeff Kirsher 
1420*874aeea5SJeff Kirsher 	/* Could this be ours?  If interrupts are disabled then the
1421*874aeea5SJeff Kirsher 	 * channel state may not be valid.
1422*874aeea5SJeff Kirsher 	 */
1423*874aeea5SJeff Kirsher 	if (!efx->legacy_irq_enabled)
1424*874aeea5SJeff Kirsher 		return result;
1425*874aeea5SJeff Kirsher 
1426*874aeea5SJeff Kirsher 	/* Read the ISR which also ACKs the interrupts */
1427*874aeea5SJeff Kirsher 	efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1428*874aeea5SJeff Kirsher 	queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1429*874aeea5SJeff Kirsher 
1430*874aeea5SJeff Kirsher 	/* Check to see if we have a serious error condition */
1431*874aeea5SJeff Kirsher 	if (queues & (1U << efx->fatal_irq_level)) {
1432*874aeea5SJeff Kirsher 		syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1433*874aeea5SJeff Kirsher 		if (unlikely(syserr))
1434*874aeea5SJeff Kirsher 			return efx_nic_fatal_interrupt(efx);
1435*874aeea5SJeff Kirsher 	}
1436*874aeea5SJeff Kirsher 
1437*874aeea5SJeff Kirsher 	if (queues != 0) {
1438*874aeea5SJeff Kirsher 		if (EFX_WORKAROUND_15783(efx))
1439*874aeea5SJeff Kirsher 			efx->irq_zero_count = 0;
1440*874aeea5SJeff Kirsher 
1441*874aeea5SJeff Kirsher 		/* Schedule processing of any interrupting queues */
1442*874aeea5SJeff Kirsher 		efx_for_each_channel(channel, efx) {
1443*874aeea5SJeff Kirsher 			if (queues & 1)
1444*874aeea5SJeff Kirsher 				efx_schedule_channel(channel);
1445*874aeea5SJeff Kirsher 			queues >>= 1;
1446*874aeea5SJeff Kirsher 		}
1447*874aeea5SJeff Kirsher 		result = IRQ_HANDLED;
1448*874aeea5SJeff Kirsher 
1449*874aeea5SJeff Kirsher 	} else if (EFX_WORKAROUND_15783(efx)) {
1450*874aeea5SJeff Kirsher 		efx_qword_t *event;
1451*874aeea5SJeff Kirsher 
1452*874aeea5SJeff Kirsher 		/* We can't return IRQ_HANDLED more than once on seeing ISR=0
1453*874aeea5SJeff Kirsher 		 * because this might be a shared interrupt. */
1454*874aeea5SJeff Kirsher 		if (efx->irq_zero_count++ == 0)
1455*874aeea5SJeff Kirsher 			result = IRQ_HANDLED;
1456*874aeea5SJeff Kirsher 
1457*874aeea5SJeff Kirsher 		/* Ensure we schedule or rearm all event queues */
1458*874aeea5SJeff Kirsher 		efx_for_each_channel(channel, efx) {
1459*874aeea5SJeff Kirsher 			event = efx_event(channel, channel->eventq_read_ptr);
1460*874aeea5SJeff Kirsher 			if (efx_event_present(event))
1461*874aeea5SJeff Kirsher 				efx_schedule_channel(channel);
1462*874aeea5SJeff Kirsher 			else
1463*874aeea5SJeff Kirsher 				efx_nic_eventq_read_ack(channel);
1464*874aeea5SJeff Kirsher 		}
1465*874aeea5SJeff Kirsher 	}
1466*874aeea5SJeff Kirsher 
1467*874aeea5SJeff Kirsher 	if (result == IRQ_HANDLED) {
1468*874aeea5SJeff Kirsher 		efx->last_irq_cpu = raw_smp_processor_id();
1469*874aeea5SJeff Kirsher 		netif_vdbg(efx, intr, efx->net_dev,
1470*874aeea5SJeff Kirsher 			   "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1471*874aeea5SJeff Kirsher 			   irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1472*874aeea5SJeff Kirsher 	}
1473*874aeea5SJeff Kirsher 
1474*874aeea5SJeff Kirsher 	return result;
1475*874aeea5SJeff Kirsher }
1476*874aeea5SJeff Kirsher 
1477*874aeea5SJeff Kirsher /* Handle an MSI interrupt
1478*874aeea5SJeff Kirsher  *
1479*874aeea5SJeff Kirsher  * Handle an MSI hardware interrupt.  This routine schedules event
1480*874aeea5SJeff Kirsher  * queue processing.  No interrupt acknowledgement cycle is necessary.
1481*874aeea5SJeff Kirsher  * Also, we never need to check that the interrupt is for us, since
1482*874aeea5SJeff Kirsher  * MSI interrupts cannot be shared.
1483*874aeea5SJeff Kirsher  */
1484*874aeea5SJeff Kirsher static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1485*874aeea5SJeff Kirsher {
1486*874aeea5SJeff Kirsher 	struct efx_channel *channel = *(struct efx_channel **)dev_id;
1487*874aeea5SJeff Kirsher 	struct efx_nic *efx = channel->efx;
1488*874aeea5SJeff Kirsher 	efx_oword_t *int_ker = efx->irq_status.addr;
1489*874aeea5SJeff Kirsher 	int syserr;
1490*874aeea5SJeff Kirsher 
1491*874aeea5SJeff Kirsher 	efx->last_irq_cpu = raw_smp_processor_id();
1492*874aeea5SJeff Kirsher 	netif_vdbg(efx, intr, efx->net_dev,
1493*874aeea5SJeff Kirsher 		   "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1494*874aeea5SJeff Kirsher 		   irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1495*874aeea5SJeff Kirsher 
1496*874aeea5SJeff Kirsher 	/* Check to see if we have a serious error condition */
1497*874aeea5SJeff Kirsher 	if (channel->channel == efx->fatal_irq_level) {
1498*874aeea5SJeff Kirsher 		syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1499*874aeea5SJeff Kirsher 		if (unlikely(syserr))
1500*874aeea5SJeff Kirsher 			return efx_nic_fatal_interrupt(efx);
1501*874aeea5SJeff Kirsher 	}
1502*874aeea5SJeff Kirsher 
1503*874aeea5SJeff Kirsher 	/* Schedule processing of the channel */
1504*874aeea5SJeff Kirsher 	efx_schedule_channel(channel);
1505*874aeea5SJeff Kirsher 
1506*874aeea5SJeff Kirsher 	return IRQ_HANDLED;
1507*874aeea5SJeff Kirsher }
1508*874aeea5SJeff Kirsher 
1509*874aeea5SJeff Kirsher 
1510*874aeea5SJeff Kirsher /* Setup RSS indirection table.
1511*874aeea5SJeff Kirsher  * This maps from the hash value of the packet to RXQ
1512*874aeea5SJeff Kirsher  */
1513*874aeea5SJeff Kirsher void efx_nic_push_rx_indir_table(struct efx_nic *efx)
1514*874aeea5SJeff Kirsher {
1515*874aeea5SJeff Kirsher 	size_t i = 0;
1516*874aeea5SJeff Kirsher 	efx_dword_t dword;
1517*874aeea5SJeff Kirsher 
1518*874aeea5SJeff Kirsher 	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1519*874aeea5SJeff Kirsher 		return;
1520*874aeea5SJeff Kirsher 
1521*874aeea5SJeff Kirsher 	BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1522*874aeea5SJeff Kirsher 		     FR_BZ_RX_INDIRECTION_TBL_ROWS);
1523*874aeea5SJeff Kirsher 
1524*874aeea5SJeff Kirsher 	for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1525*874aeea5SJeff Kirsher 		EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1526*874aeea5SJeff Kirsher 				     efx->rx_indir_table[i]);
1527*874aeea5SJeff Kirsher 		efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
1528*874aeea5SJeff Kirsher 	}
1529*874aeea5SJeff Kirsher }
1530*874aeea5SJeff Kirsher 
1531*874aeea5SJeff Kirsher /* Hook interrupt handler(s)
1532*874aeea5SJeff Kirsher  * Try MSI and then legacy interrupts.
1533*874aeea5SJeff Kirsher  */
1534*874aeea5SJeff Kirsher int efx_nic_init_interrupt(struct efx_nic *efx)
1535*874aeea5SJeff Kirsher {
1536*874aeea5SJeff Kirsher 	struct efx_channel *channel;
1537*874aeea5SJeff Kirsher 	int rc;
1538*874aeea5SJeff Kirsher 
1539*874aeea5SJeff Kirsher 	if (!EFX_INT_MODE_USE_MSI(efx)) {
1540*874aeea5SJeff Kirsher 		irq_handler_t handler;
1541*874aeea5SJeff Kirsher 		if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1542*874aeea5SJeff Kirsher 			handler = efx_legacy_interrupt;
1543*874aeea5SJeff Kirsher 		else
1544*874aeea5SJeff Kirsher 			handler = falcon_legacy_interrupt_a1;
1545*874aeea5SJeff Kirsher 
1546*874aeea5SJeff Kirsher 		rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1547*874aeea5SJeff Kirsher 				 efx->name, efx);
1548*874aeea5SJeff Kirsher 		if (rc) {
1549*874aeea5SJeff Kirsher 			netif_err(efx, drv, efx->net_dev,
1550*874aeea5SJeff Kirsher 				  "failed to hook legacy IRQ %d\n",
1551*874aeea5SJeff Kirsher 				  efx->pci_dev->irq);
1552*874aeea5SJeff Kirsher 			goto fail1;
1553*874aeea5SJeff Kirsher 		}
1554*874aeea5SJeff Kirsher 		return 0;
1555*874aeea5SJeff Kirsher 	}
1556*874aeea5SJeff Kirsher 
1557*874aeea5SJeff Kirsher 	/* Hook MSI or MSI-X interrupt */
1558*874aeea5SJeff Kirsher 	efx_for_each_channel(channel, efx) {
1559*874aeea5SJeff Kirsher 		rc = request_irq(channel->irq, efx_msi_interrupt,
1560*874aeea5SJeff Kirsher 				 IRQF_PROBE_SHARED, /* Not shared */
1561*874aeea5SJeff Kirsher 				 efx->channel_name[channel->channel],
1562*874aeea5SJeff Kirsher 				 &efx->channel[channel->channel]);
1563*874aeea5SJeff Kirsher 		if (rc) {
1564*874aeea5SJeff Kirsher 			netif_err(efx, drv, efx->net_dev,
1565*874aeea5SJeff Kirsher 				  "failed to hook IRQ %d\n", channel->irq);
1566*874aeea5SJeff Kirsher 			goto fail2;
1567*874aeea5SJeff Kirsher 		}
1568*874aeea5SJeff Kirsher 	}
1569*874aeea5SJeff Kirsher 
1570*874aeea5SJeff Kirsher 	return 0;
1571*874aeea5SJeff Kirsher 
1572*874aeea5SJeff Kirsher  fail2:
1573*874aeea5SJeff Kirsher 	efx_for_each_channel(channel, efx)
1574*874aeea5SJeff Kirsher 		free_irq(channel->irq, &efx->channel[channel->channel]);
1575*874aeea5SJeff Kirsher  fail1:
1576*874aeea5SJeff Kirsher 	return rc;
1577*874aeea5SJeff Kirsher }
1578*874aeea5SJeff Kirsher 
1579*874aeea5SJeff Kirsher void efx_nic_fini_interrupt(struct efx_nic *efx)
1580*874aeea5SJeff Kirsher {
1581*874aeea5SJeff Kirsher 	struct efx_channel *channel;
1582*874aeea5SJeff Kirsher 	efx_oword_t reg;
1583*874aeea5SJeff Kirsher 
1584*874aeea5SJeff Kirsher 	/* Disable MSI/MSI-X interrupts */
1585*874aeea5SJeff Kirsher 	efx_for_each_channel(channel, efx) {
1586*874aeea5SJeff Kirsher 		if (channel->irq)
1587*874aeea5SJeff Kirsher 			free_irq(channel->irq, &efx->channel[channel->channel]);
1588*874aeea5SJeff Kirsher 	}
1589*874aeea5SJeff Kirsher 
1590*874aeea5SJeff Kirsher 	/* ACK legacy interrupt */
1591*874aeea5SJeff Kirsher 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1592*874aeea5SJeff Kirsher 		efx_reado(efx, &reg, FR_BZ_INT_ISR0);
1593*874aeea5SJeff Kirsher 	else
1594*874aeea5SJeff Kirsher 		falcon_irq_ack_a1(efx);
1595*874aeea5SJeff Kirsher 
1596*874aeea5SJeff Kirsher 	/* Disable legacy interrupt */
1597*874aeea5SJeff Kirsher 	if (efx->legacy_irq)
1598*874aeea5SJeff Kirsher 		free_irq(efx->legacy_irq, efx);
1599*874aeea5SJeff Kirsher }
1600*874aeea5SJeff Kirsher 
1601*874aeea5SJeff Kirsher u32 efx_nic_fpga_ver(struct efx_nic *efx)
1602*874aeea5SJeff Kirsher {
1603*874aeea5SJeff Kirsher 	efx_oword_t altera_build;
1604*874aeea5SJeff Kirsher 	efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1605*874aeea5SJeff Kirsher 	return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1606*874aeea5SJeff Kirsher }
1607*874aeea5SJeff Kirsher 
1608*874aeea5SJeff Kirsher void efx_nic_init_common(struct efx_nic *efx)
1609*874aeea5SJeff Kirsher {
1610*874aeea5SJeff Kirsher 	efx_oword_t temp;
1611*874aeea5SJeff Kirsher 
1612*874aeea5SJeff Kirsher 	/* Set positions of descriptor caches in SRAM. */
1613*874aeea5SJeff Kirsher 	EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
1614*874aeea5SJeff Kirsher 			     efx->type->tx_dc_base / 8);
1615*874aeea5SJeff Kirsher 	efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1616*874aeea5SJeff Kirsher 	EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
1617*874aeea5SJeff Kirsher 			     efx->type->rx_dc_base / 8);
1618*874aeea5SJeff Kirsher 	efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1619*874aeea5SJeff Kirsher 
1620*874aeea5SJeff Kirsher 	/* Set TX descriptor cache size. */
1621*874aeea5SJeff Kirsher 	BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1622*874aeea5SJeff Kirsher 	EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1623*874aeea5SJeff Kirsher 	efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1624*874aeea5SJeff Kirsher 
1625*874aeea5SJeff Kirsher 	/* Set RX descriptor cache size.  Set low watermark to size-8, as
1626*874aeea5SJeff Kirsher 	 * this allows most efficient prefetching.
1627*874aeea5SJeff Kirsher 	 */
1628*874aeea5SJeff Kirsher 	BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1629*874aeea5SJeff Kirsher 	EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1630*874aeea5SJeff Kirsher 	efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1631*874aeea5SJeff Kirsher 	EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1632*874aeea5SJeff Kirsher 	efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1633*874aeea5SJeff Kirsher 
1634*874aeea5SJeff Kirsher 	/* Program INT_KER address */
1635*874aeea5SJeff Kirsher 	EFX_POPULATE_OWORD_2(temp,
1636*874aeea5SJeff Kirsher 			     FRF_AZ_NORM_INT_VEC_DIS_KER,
1637*874aeea5SJeff Kirsher 			     EFX_INT_MODE_USE_MSI(efx),
1638*874aeea5SJeff Kirsher 			     FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1639*874aeea5SJeff Kirsher 	efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1640*874aeea5SJeff Kirsher 
1641*874aeea5SJeff Kirsher 	if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1642*874aeea5SJeff Kirsher 		/* Use an interrupt level unused by event queues */
1643*874aeea5SJeff Kirsher 		efx->fatal_irq_level = 0x1f;
1644*874aeea5SJeff Kirsher 	else
1645*874aeea5SJeff Kirsher 		/* Use a valid MSI-X vector */
1646*874aeea5SJeff Kirsher 		efx->fatal_irq_level = 0;
1647*874aeea5SJeff Kirsher 
1648*874aeea5SJeff Kirsher 	/* Enable all the genuinely fatal interrupts.  (They are still
1649*874aeea5SJeff Kirsher 	 * masked by the overall interrupt mask, controlled by
1650*874aeea5SJeff Kirsher 	 * falcon_interrupts()).
1651*874aeea5SJeff Kirsher 	 *
1652*874aeea5SJeff Kirsher 	 * Note: All other fatal interrupts are enabled
1653*874aeea5SJeff Kirsher 	 */
1654*874aeea5SJeff Kirsher 	EFX_POPULATE_OWORD_3(temp,
1655*874aeea5SJeff Kirsher 			     FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1656*874aeea5SJeff Kirsher 			     FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1657*874aeea5SJeff Kirsher 			     FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1658*874aeea5SJeff Kirsher 	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1659*874aeea5SJeff Kirsher 		EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1660*874aeea5SJeff Kirsher 	EFX_INVERT_OWORD(temp);
1661*874aeea5SJeff Kirsher 	efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1662*874aeea5SJeff Kirsher 
1663*874aeea5SJeff Kirsher 	efx_nic_push_rx_indir_table(efx);
1664*874aeea5SJeff Kirsher 
1665*874aeea5SJeff Kirsher 	/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1666*874aeea5SJeff Kirsher 	 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1667*874aeea5SJeff Kirsher 	 */
1668*874aeea5SJeff Kirsher 	efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1669*874aeea5SJeff Kirsher 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1670*874aeea5SJeff Kirsher 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1671*874aeea5SJeff Kirsher 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1672*874aeea5SJeff Kirsher 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1673*874aeea5SJeff Kirsher 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1674*874aeea5SJeff Kirsher 	/* Enable SW_EV to inherit in char driver - assume harmless here */
1675*874aeea5SJeff Kirsher 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1676*874aeea5SJeff Kirsher 	/* Prefetch threshold 2 => fetch when descriptor cache half empty */
1677*874aeea5SJeff Kirsher 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1678*874aeea5SJeff Kirsher 	/* Disable hardware watchdog which can misfire */
1679*874aeea5SJeff Kirsher 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1680*874aeea5SJeff Kirsher 	/* Squash TX of packets of 16 bytes or less */
1681*874aeea5SJeff Kirsher 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1682*874aeea5SJeff Kirsher 		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1683*874aeea5SJeff Kirsher 	efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1684*874aeea5SJeff Kirsher 
1685*874aeea5SJeff Kirsher 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1686*874aeea5SJeff Kirsher 		EFX_POPULATE_OWORD_4(temp,
1687*874aeea5SJeff Kirsher 				     /* Default values */
1688*874aeea5SJeff Kirsher 				     FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1689*874aeea5SJeff Kirsher 				     FRF_BZ_TX_PACE_SB_AF, 0xb,
1690*874aeea5SJeff Kirsher 				     FRF_BZ_TX_PACE_FB_BASE, 0,
1691*874aeea5SJeff Kirsher 				     /* Allow large pace values in the
1692*874aeea5SJeff Kirsher 				      * fast bin. */
1693*874aeea5SJeff Kirsher 				     FRF_BZ_TX_PACE_BIN_TH,
1694*874aeea5SJeff Kirsher 				     FFE_BZ_TX_PACE_RESERVED);
1695*874aeea5SJeff Kirsher 		efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1696*874aeea5SJeff Kirsher 	}
1697*874aeea5SJeff Kirsher }
1698*874aeea5SJeff Kirsher 
1699*874aeea5SJeff Kirsher /* Register dump */
1700*874aeea5SJeff Kirsher 
1701*874aeea5SJeff Kirsher #define REGISTER_REVISION_A	1
1702*874aeea5SJeff Kirsher #define REGISTER_REVISION_B	2
1703*874aeea5SJeff Kirsher #define REGISTER_REVISION_C	3
1704*874aeea5SJeff Kirsher #define REGISTER_REVISION_Z	3	/* latest revision */
1705*874aeea5SJeff Kirsher 
1706*874aeea5SJeff Kirsher struct efx_nic_reg {
1707*874aeea5SJeff Kirsher 	u32 offset:24;
1708*874aeea5SJeff Kirsher 	u32 min_revision:2, max_revision:2;
1709*874aeea5SJeff Kirsher };
1710*874aeea5SJeff Kirsher 
1711*874aeea5SJeff Kirsher #define REGISTER(name, min_rev, max_rev) {				\
1712*874aeea5SJeff Kirsher 	FR_ ## min_rev ## max_rev ## _ ## name,				\
1713*874aeea5SJeff Kirsher 	REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev	\
1714*874aeea5SJeff Kirsher }
1715*874aeea5SJeff Kirsher #define REGISTER_AA(name) REGISTER(name, A, A)
1716*874aeea5SJeff Kirsher #define REGISTER_AB(name) REGISTER(name, A, B)
1717*874aeea5SJeff Kirsher #define REGISTER_AZ(name) REGISTER(name, A, Z)
1718*874aeea5SJeff Kirsher #define REGISTER_BB(name) REGISTER(name, B, B)
1719*874aeea5SJeff Kirsher #define REGISTER_BZ(name) REGISTER(name, B, Z)
1720*874aeea5SJeff Kirsher #define REGISTER_CZ(name) REGISTER(name, C, Z)
1721*874aeea5SJeff Kirsher 
1722*874aeea5SJeff Kirsher static const struct efx_nic_reg efx_nic_regs[] = {
1723*874aeea5SJeff Kirsher 	REGISTER_AZ(ADR_REGION),
1724*874aeea5SJeff Kirsher 	REGISTER_AZ(INT_EN_KER),
1725*874aeea5SJeff Kirsher 	REGISTER_BZ(INT_EN_CHAR),
1726*874aeea5SJeff Kirsher 	REGISTER_AZ(INT_ADR_KER),
1727*874aeea5SJeff Kirsher 	REGISTER_BZ(INT_ADR_CHAR),
1728*874aeea5SJeff Kirsher 	/* INT_ACK_KER is WO */
1729*874aeea5SJeff Kirsher 	/* INT_ISR0 is RC */
1730*874aeea5SJeff Kirsher 	REGISTER_AZ(HW_INIT),
1731*874aeea5SJeff Kirsher 	REGISTER_CZ(USR_EV_CFG),
1732*874aeea5SJeff Kirsher 	REGISTER_AB(EE_SPI_HCMD),
1733*874aeea5SJeff Kirsher 	REGISTER_AB(EE_SPI_HADR),
1734*874aeea5SJeff Kirsher 	REGISTER_AB(EE_SPI_HDATA),
1735*874aeea5SJeff Kirsher 	REGISTER_AB(EE_BASE_PAGE),
1736*874aeea5SJeff Kirsher 	REGISTER_AB(EE_VPD_CFG0),
1737*874aeea5SJeff Kirsher 	/* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1738*874aeea5SJeff Kirsher 	/* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1739*874aeea5SJeff Kirsher 	/* PCIE_CORE_INDIRECT is indirect */
1740*874aeea5SJeff Kirsher 	REGISTER_AB(NIC_STAT),
1741*874aeea5SJeff Kirsher 	REGISTER_AB(GPIO_CTL),
1742*874aeea5SJeff Kirsher 	REGISTER_AB(GLB_CTL),
1743*874aeea5SJeff Kirsher 	/* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1744*874aeea5SJeff Kirsher 	REGISTER_BZ(DP_CTRL),
1745*874aeea5SJeff Kirsher 	REGISTER_AZ(MEM_STAT),
1746*874aeea5SJeff Kirsher 	REGISTER_AZ(CS_DEBUG),
1747*874aeea5SJeff Kirsher 	REGISTER_AZ(ALTERA_BUILD),
1748*874aeea5SJeff Kirsher 	REGISTER_AZ(CSR_SPARE),
1749*874aeea5SJeff Kirsher 	REGISTER_AB(PCIE_SD_CTL0123),
1750*874aeea5SJeff Kirsher 	REGISTER_AB(PCIE_SD_CTL45),
1751*874aeea5SJeff Kirsher 	REGISTER_AB(PCIE_PCS_CTL_STAT),
1752*874aeea5SJeff Kirsher 	/* DEBUG_DATA_OUT is not used */
1753*874aeea5SJeff Kirsher 	/* DRV_EV is WO */
1754*874aeea5SJeff Kirsher 	REGISTER_AZ(EVQ_CTL),
1755*874aeea5SJeff Kirsher 	REGISTER_AZ(EVQ_CNT1),
1756*874aeea5SJeff Kirsher 	REGISTER_AZ(EVQ_CNT2),
1757*874aeea5SJeff Kirsher 	REGISTER_AZ(BUF_TBL_CFG),
1758*874aeea5SJeff Kirsher 	REGISTER_AZ(SRM_RX_DC_CFG),
1759*874aeea5SJeff Kirsher 	REGISTER_AZ(SRM_TX_DC_CFG),
1760*874aeea5SJeff Kirsher 	REGISTER_AZ(SRM_CFG),
1761*874aeea5SJeff Kirsher 	/* BUF_TBL_UPD is WO */
1762*874aeea5SJeff Kirsher 	REGISTER_AZ(SRM_UPD_EVQ),
1763*874aeea5SJeff Kirsher 	REGISTER_AZ(SRAM_PARITY),
1764*874aeea5SJeff Kirsher 	REGISTER_AZ(RX_CFG),
1765*874aeea5SJeff Kirsher 	REGISTER_BZ(RX_FILTER_CTL),
1766*874aeea5SJeff Kirsher 	/* RX_FLUSH_DESCQ is WO */
1767*874aeea5SJeff Kirsher 	REGISTER_AZ(RX_DC_CFG),
1768*874aeea5SJeff Kirsher 	REGISTER_AZ(RX_DC_PF_WM),
1769*874aeea5SJeff Kirsher 	REGISTER_BZ(RX_RSS_TKEY),
1770*874aeea5SJeff Kirsher 	/* RX_NODESC_DROP is RC */
1771*874aeea5SJeff Kirsher 	REGISTER_AA(RX_SELF_RST),
1772*874aeea5SJeff Kirsher 	/* RX_DEBUG, RX_PUSH_DROP are not used */
1773*874aeea5SJeff Kirsher 	REGISTER_CZ(RX_RSS_IPV6_REG1),
1774*874aeea5SJeff Kirsher 	REGISTER_CZ(RX_RSS_IPV6_REG2),
1775*874aeea5SJeff Kirsher 	REGISTER_CZ(RX_RSS_IPV6_REG3),
1776*874aeea5SJeff Kirsher 	/* TX_FLUSH_DESCQ is WO */
1777*874aeea5SJeff Kirsher 	REGISTER_AZ(TX_DC_CFG),
1778*874aeea5SJeff Kirsher 	REGISTER_AA(TX_CHKSM_CFG),
1779*874aeea5SJeff Kirsher 	REGISTER_AZ(TX_CFG),
1780*874aeea5SJeff Kirsher 	/* TX_PUSH_DROP is not used */
1781*874aeea5SJeff Kirsher 	REGISTER_AZ(TX_RESERVED),
1782*874aeea5SJeff Kirsher 	REGISTER_BZ(TX_PACE),
1783*874aeea5SJeff Kirsher 	/* TX_PACE_DROP_QID is RC */
1784*874aeea5SJeff Kirsher 	REGISTER_BB(TX_VLAN),
1785*874aeea5SJeff Kirsher 	REGISTER_BZ(TX_IPFIL_PORTEN),
1786*874aeea5SJeff Kirsher 	REGISTER_AB(MD_TXD),
1787*874aeea5SJeff Kirsher 	REGISTER_AB(MD_RXD),
1788*874aeea5SJeff Kirsher 	REGISTER_AB(MD_CS),
1789*874aeea5SJeff Kirsher 	REGISTER_AB(MD_PHY_ADR),
1790*874aeea5SJeff Kirsher 	REGISTER_AB(MD_ID),
1791*874aeea5SJeff Kirsher 	/* MD_STAT is RC */
1792*874aeea5SJeff Kirsher 	REGISTER_AB(MAC_STAT_DMA),
1793*874aeea5SJeff Kirsher 	REGISTER_AB(MAC_CTRL),
1794*874aeea5SJeff Kirsher 	REGISTER_BB(GEN_MODE),
1795*874aeea5SJeff Kirsher 	REGISTER_AB(MAC_MC_HASH_REG0),
1796*874aeea5SJeff Kirsher 	REGISTER_AB(MAC_MC_HASH_REG1),
1797*874aeea5SJeff Kirsher 	REGISTER_AB(GM_CFG1),
1798*874aeea5SJeff Kirsher 	REGISTER_AB(GM_CFG2),
1799*874aeea5SJeff Kirsher 	/* GM_IPG and GM_HD are not used */
1800*874aeea5SJeff Kirsher 	REGISTER_AB(GM_MAX_FLEN),
1801*874aeea5SJeff Kirsher 	/* GM_TEST is not used */
1802*874aeea5SJeff Kirsher 	REGISTER_AB(GM_ADR1),
1803*874aeea5SJeff Kirsher 	REGISTER_AB(GM_ADR2),
1804*874aeea5SJeff Kirsher 	REGISTER_AB(GMF_CFG0),
1805*874aeea5SJeff Kirsher 	REGISTER_AB(GMF_CFG1),
1806*874aeea5SJeff Kirsher 	REGISTER_AB(GMF_CFG2),
1807*874aeea5SJeff Kirsher 	REGISTER_AB(GMF_CFG3),
1808*874aeea5SJeff Kirsher 	REGISTER_AB(GMF_CFG4),
1809*874aeea5SJeff Kirsher 	REGISTER_AB(GMF_CFG5),
1810*874aeea5SJeff Kirsher 	REGISTER_BB(TX_SRC_MAC_CTL),
1811*874aeea5SJeff Kirsher 	REGISTER_AB(XM_ADR_LO),
1812*874aeea5SJeff Kirsher 	REGISTER_AB(XM_ADR_HI),
1813*874aeea5SJeff Kirsher 	REGISTER_AB(XM_GLB_CFG),
1814*874aeea5SJeff Kirsher 	REGISTER_AB(XM_TX_CFG),
1815*874aeea5SJeff Kirsher 	REGISTER_AB(XM_RX_CFG),
1816*874aeea5SJeff Kirsher 	REGISTER_AB(XM_MGT_INT_MASK),
1817*874aeea5SJeff Kirsher 	REGISTER_AB(XM_FC),
1818*874aeea5SJeff Kirsher 	REGISTER_AB(XM_PAUSE_TIME),
1819*874aeea5SJeff Kirsher 	REGISTER_AB(XM_TX_PARAM),
1820*874aeea5SJeff Kirsher 	REGISTER_AB(XM_RX_PARAM),
1821*874aeea5SJeff Kirsher 	/* XM_MGT_INT_MSK (note no 'A') is RC */
1822*874aeea5SJeff Kirsher 	REGISTER_AB(XX_PWR_RST),
1823*874aeea5SJeff Kirsher 	REGISTER_AB(XX_SD_CTL),
1824*874aeea5SJeff Kirsher 	REGISTER_AB(XX_TXDRV_CTL),
1825*874aeea5SJeff Kirsher 	/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
1826*874aeea5SJeff Kirsher 	/* XX_CORE_STAT is partly RC */
1827*874aeea5SJeff Kirsher };
1828*874aeea5SJeff Kirsher 
1829*874aeea5SJeff Kirsher struct efx_nic_reg_table {
1830*874aeea5SJeff Kirsher 	u32 offset:24;
1831*874aeea5SJeff Kirsher 	u32 min_revision:2, max_revision:2;
1832*874aeea5SJeff Kirsher 	u32 step:6, rows:21;
1833*874aeea5SJeff Kirsher };
1834*874aeea5SJeff Kirsher 
1835*874aeea5SJeff Kirsher #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1836*874aeea5SJeff Kirsher 	offset,								\
1837*874aeea5SJeff Kirsher 	REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev,	\
1838*874aeea5SJeff Kirsher 	step, rows							\
1839*874aeea5SJeff Kirsher }
1840*874aeea5SJeff Kirsher #define REGISTER_TABLE(name, min_rev, max_rev) 				\
1841*874aeea5SJeff Kirsher 	REGISTER_TABLE_DIMENSIONS(					\
1842*874aeea5SJeff Kirsher 		name, FR_ ## min_rev ## max_rev ## _ ## name,		\
1843*874aeea5SJeff Kirsher 		min_rev, max_rev,					\
1844*874aeea5SJeff Kirsher 		FR_ ## min_rev ## max_rev ## _ ## name ## _STEP,	\
1845*874aeea5SJeff Kirsher 		FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1846*874aeea5SJeff Kirsher #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1847*874aeea5SJeff Kirsher #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1848*874aeea5SJeff Kirsher #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1849*874aeea5SJeff Kirsher #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1850*874aeea5SJeff Kirsher #define REGISTER_TABLE_BB_CZ(name)					\
1851*874aeea5SJeff Kirsher 	REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B,		\
1852*874aeea5SJeff Kirsher 				  FR_BZ_ ## name ## _STEP,		\
1853*874aeea5SJeff Kirsher 				  FR_BB_ ## name ## _ROWS),		\
1854*874aeea5SJeff Kirsher 	REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z,		\
1855*874aeea5SJeff Kirsher 				  FR_BZ_ ## name ## _STEP,		\
1856*874aeea5SJeff Kirsher 				  FR_CZ_ ## name ## _ROWS)
1857*874aeea5SJeff Kirsher #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
1858*874aeea5SJeff Kirsher 
1859*874aeea5SJeff Kirsher static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1860*874aeea5SJeff Kirsher 	/* DRIVER is not used */
1861*874aeea5SJeff Kirsher 	/* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
1862*874aeea5SJeff Kirsher 	REGISTER_TABLE_BB(TX_IPFIL_TBL),
1863*874aeea5SJeff Kirsher 	REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
1864*874aeea5SJeff Kirsher 	REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
1865*874aeea5SJeff Kirsher 	REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
1866*874aeea5SJeff Kirsher 	REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
1867*874aeea5SJeff Kirsher 	REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
1868*874aeea5SJeff Kirsher 	REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
1869*874aeea5SJeff Kirsher 	REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
1870*874aeea5SJeff Kirsher 	/* We can't reasonably read all of the buffer table (up to 8MB!).
1871*874aeea5SJeff Kirsher 	 * However this driver will only use a few entries.  Reading
1872*874aeea5SJeff Kirsher 	 * 1K entries allows for some expansion of queue count and
1873*874aeea5SJeff Kirsher 	 * size before we need to change the version. */
1874*874aeea5SJeff Kirsher 	REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
1875*874aeea5SJeff Kirsher 				  A, A, 8, 1024),
1876*874aeea5SJeff Kirsher 	REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
1877*874aeea5SJeff Kirsher 				  B, Z, 8, 1024),
1878*874aeea5SJeff Kirsher 	REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
1879*874aeea5SJeff Kirsher 	REGISTER_TABLE_BB_CZ(TIMER_TBL),
1880*874aeea5SJeff Kirsher 	REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
1881*874aeea5SJeff Kirsher 	REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
1882*874aeea5SJeff Kirsher 	/* TX_FILTER_TBL0 is huge and not used by this driver */
1883*874aeea5SJeff Kirsher 	REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
1884*874aeea5SJeff Kirsher 	REGISTER_TABLE_CZ(MC_TREG_SMEM),
1885*874aeea5SJeff Kirsher 	/* MSIX_PBA_TABLE is not mapped */
1886*874aeea5SJeff Kirsher 	/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
1887*874aeea5SJeff Kirsher 	REGISTER_TABLE_BZ(RX_FILTER_TBL0),
1888*874aeea5SJeff Kirsher };
1889*874aeea5SJeff Kirsher 
1890*874aeea5SJeff Kirsher size_t efx_nic_get_regs_len(struct efx_nic *efx)
1891*874aeea5SJeff Kirsher {
1892*874aeea5SJeff Kirsher 	const struct efx_nic_reg *reg;
1893*874aeea5SJeff Kirsher 	const struct efx_nic_reg_table *table;
1894*874aeea5SJeff Kirsher 	size_t len = 0;
1895*874aeea5SJeff Kirsher 
1896*874aeea5SJeff Kirsher 	for (reg = efx_nic_regs;
1897*874aeea5SJeff Kirsher 	     reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1898*874aeea5SJeff Kirsher 	     reg++)
1899*874aeea5SJeff Kirsher 		if (efx->type->revision >= reg->min_revision &&
1900*874aeea5SJeff Kirsher 		    efx->type->revision <= reg->max_revision)
1901*874aeea5SJeff Kirsher 			len += sizeof(efx_oword_t);
1902*874aeea5SJeff Kirsher 
1903*874aeea5SJeff Kirsher 	for (table = efx_nic_reg_tables;
1904*874aeea5SJeff Kirsher 	     table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1905*874aeea5SJeff Kirsher 	     table++)
1906*874aeea5SJeff Kirsher 		if (efx->type->revision >= table->min_revision &&
1907*874aeea5SJeff Kirsher 		    efx->type->revision <= table->max_revision)
1908*874aeea5SJeff Kirsher 			len += table->rows * min_t(size_t, table->step, 16);
1909*874aeea5SJeff Kirsher 
1910*874aeea5SJeff Kirsher 	return len;
1911*874aeea5SJeff Kirsher }
1912*874aeea5SJeff Kirsher 
1913*874aeea5SJeff Kirsher void efx_nic_get_regs(struct efx_nic *efx, void *buf)
1914*874aeea5SJeff Kirsher {
1915*874aeea5SJeff Kirsher 	const struct efx_nic_reg *reg;
1916*874aeea5SJeff Kirsher 	const struct efx_nic_reg_table *table;
1917*874aeea5SJeff Kirsher 
1918*874aeea5SJeff Kirsher 	for (reg = efx_nic_regs;
1919*874aeea5SJeff Kirsher 	     reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1920*874aeea5SJeff Kirsher 	     reg++) {
1921*874aeea5SJeff Kirsher 		if (efx->type->revision >= reg->min_revision &&
1922*874aeea5SJeff Kirsher 		    efx->type->revision <= reg->max_revision) {
1923*874aeea5SJeff Kirsher 			efx_reado(efx, (efx_oword_t *)buf, reg->offset);
1924*874aeea5SJeff Kirsher 			buf += sizeof(efx_oword_t);
1925*874aeea5SJeff Kirsher 		}
1926*874aeea5SJeff Kirsher 	}
1927*874aeea5SJeff Kirsher 
1928*874aeea5SJeff Kirsher 	for (table = efx_nic_reg_tables;
1929*874aeea5SJeff Kirsher 	     table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1930*874aeea5SJeff Kirsher 	     table++) {
1931*874aeea5SJeff Kirsher 		size_t size, i;
1932*874aeea5SJeff Kirsher 
1933*874aeea5SJeff Kirsher 		if (!(efx->type->revision >= table->min_revision &&
1934*874aeea5SJeff Kirsher 		      efx->type->revision <= table->max_revision))
1935*874aeea5SJeff Kirsher 			continue;
1936*874aeea5SJeff Kirsher 
1937*874aeea5SJeff Kirsher 		size = min_t(size_t, table->step, 16);
1938*874aeea5SJeff Kirsher 
1939*874aeea5SJeff Kirsher 		if (table->offset >= efx->type->mem_map_size) {
1940*874aeea5SJeff Kirsher 			/* No longer mapped; return dummy data */
1941*874aeea5SJeff Kirsher 			memcpy(buf, "\xde\xc0\xad\xde", 4);
1942*874aeea5SJeff Kirsher 			buf += table->rows * size;
1943*874aeea5SJeff Kirsher 			continue;
1944*874aeea5SJeff Kirsher 		}
1945*874aeea5SJeff Kirsher 
1946*874aeea5SJeff Kirsher 		for (i = 0; i < table->rows; i++) {
1947*874aeea5SJeff Kirsher 			switch (table->step) {
1948*874aeea5SJeff Kirsher 			case 4: /* 32-bit register or SRAM */
1949*874aeea5SJeff Kirsher 				efx_readd_table(efx, buf, table->offset, i);
1950*874aeea5SJeff Kirsher 				break;
1951*874aeea5SJeff Kirsher 			case 8: /* 64-bit SRAM */
1952*874aeea5SJeff Kirsher 				efx_sram_readq(efx,
1953*874aeea5SJeff Kirsher 					       efx->membase + table->offset,
1954*874aeea5SJeff Kirsher 					       buf, i);
1955*874aeea5SJeff Kirsher 				break;
1956*874aeea5SJeff Kirsher 			case 16: /* 128-bit register */
1957*874aeea5SJeff Kirsher 				efx_reado_table(efx, buf, table->offset, i);
1958*874aeea5SJeff Kirsher 				break;
1959*874aeea5SJeff Kirsher 			case 32: /* 128-bit register, interleaved */
1960*874aeea5SJeff Kirsher 				efx_reado_table(efx, buf, table->offset, 2 * i);
1961*874aeea5SJeff Kirsher 				break;
1962*874aeea5SJeff Kirsher 			default:
1963*874aeea5SJeff Kirsher 				WARN_ON(1);
1964*874aeea5SJeff Kirsher 				return;
1965*874aeea5SJeff Kirsher 			}
1966*874aeea5SJeff Kirsher 			buf += size;
1967*874aeea5SJeff Kirsher 		}
1968*874aeea5SJeff Kirsher 	}
1969*874aeea5SJeff Kirsher }
1970