xref: /openbmc/linux/drivers/net/ethernet/sfc/nic.c (revision 874aeea5)
1 /****************************************************************************
2  * Driver for Solarflare Solarstorm network controllers and boards
3  * Copyright 2005-2006 Fen Systems Ltd.
4  * Copyright 2006-2011 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10 
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include "net_driver.h"
18 #include "bitfield.h"
19 #include "efx.h"
20 #include "nic.h"
21 #include "regs.h"
22 #include "io.h"
23 #include "workarounds.h"
24 
25 /**************************************************************************
26  *
27  * Configurable values
28  *
29  **************************************************************************
30  */
31 
32 /* This is set to 16 for a good reason.  In summary, if larger than
33  * 16, the descriptor cache holds more than a default socket
34  * buffer's worth of packets (for UDP we can only have at most one
35  * socket buffer's worth outstanding).  This combined with the fact
36  * that we only get 1 TX event per descriptor cache means the NIC
37  * goes idle.
38  */
39 #define TX_DC_ENTRIES 16
40 #define TX_DC_ENTRIES_ORDER 1
41 
42 #define RX_DC_ENTRIES 64
43 #define RX_DC_ENTRIES_ORDER 3
44 
45 /* If EFX_MAX_INT_ERRORS internal errors occur within
46  * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
47  * disable it.
48  */
49 #define EFX_INT_ERROR_EXPIRE 3600
50 #define EFX_MAX_INT_ERRORS 5
51 
52 /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times
53  */
54 #define EFX_FLUSH_INTERVAL 10
55 #define EFX_FLUSH_POLL_COUNT 100
56 
57 /* Size and alignment of special buffers (4KB) */
58 #define EFX_BUF_SIZE 4096
59 
60 /* Depth of RX flush request fifo */
61 #define EFX_RX_FLUSH_COUNT 4
62 
63 /* Generated event code for efx_generate_test_event() */
64 #define EFX_CHANNEL_MAGIC_TEST(_channel)	\
65 	(0x00010100 + (_channel)->channel)
66 
67 /* Generated event code for efx_generate_fill_event() */
68 #define EFX_CHANNEL_MAGIC_FILL(_channel)	\
69 	(0x00010200 + (_channel)->channel)
70 
71 /**************************************************************************
72  *
73  * Solarstorm hardware access
74  *
75  **************************************************************************/
76 
77 static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
78 				     unsigned int index)
79 {
80 	efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
81 			value, index);
82 }
83 
84 /* Read the current event from the event queue */
85 static inline efx_qword_t *efx_event(struct efx_channel *channel,
86 				     unsigned int index)
87 {
88 	return ((efx_qword_t *) (channel->eventq.addr)) +
89 		(index & channel->eventq_mask);
90 }
91 
92 /* See if an event is present
93  *
94  * We check both the high and low dword of the event for all ones.  We
95  * wrote all ones when we cleared the event, and no valid event can
96  * have all ones in either its high or low dwords.  This approach is
97  * robust against reordering.
98  *
99  * Note that using a single 64-bit comparison is incorrect; even
100  * though the CPU read will be atomic, the DMA write may not be.
101  */
102 static inline int efx_event_present(efx_qword_t *event)
103 {
104 	return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
105 		  EFX_DWORD_IS_ALL_ONES(event->dword[1]));
106 }
107 
108 static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
109 				     const efx_oword_t *mask)
110 {
111 	return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
112 		((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
113 }
114 
115 int efx_nic_test_registers(struct efx_nic *efx,
116 			   const struct efx_nic_register_test *regs,
117 			   size_t n_regs)
118 {
119 	unsigned address = 0, i, j;
120 	efx_oword_t mask, imask, original, reg, buf;
121 
122 	/* Falcon should be in loopback to isolate the XMAC from the PHY */
123 	WARN_ON(!LOOPBACK_INTERNAL(efx));
124 
125 	for (i = 0; i < n_regs; ++i) {
126 		address = regs[i].address;
127 		mask = imask = regs[i].mask;
128 		EFX_INVERT_OWORD(imask);
129 
130 		efx_reado(efx, &original, address);
131 
132 		/* bit sweep on and off */
133 		for (j = 0; j < 128; j++) {
134 			if (!EFX_EXTRACT_OWORD32(mask, j, j))
135 				continue;
136 
137 			/* Test this testable bit can be set in isolation */
138 			EFX_AND_OWORD(reg, original, mask);
139 			EFX_SET_OWORD32(reg, j, j, 1);
140 
141 			efx_writeo(efx, &reg, address);
142 			efx_reado(efx, &buf, address);
143 
144 			if (efx_masked_compare_oword(&reg, &buf, &mask))
145 				goto fail;
146 
147 			/* Test this testable bit can be cleared in isolation */
148 			EFX_OR_OWORD(reg, original, mask);
149 			EFX_SET_OWORD32(reg, j, j, 0);
150 
151 			efx_writeo(efx, &reg, address);
152 			efx_reado(efx, &buf, address);
153 
154 			if (efx_masked_compare_oword(&reg, &buf, &mask))
155 				goto fail;
156 		}
157 
158 		efx_writeo(efx, &original, address);
159 	}
160 
161 	return 0;
162 
163 fail:
164 	netif_err(efx, hw, efx->net_dev,
165 		  "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
166 		  " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
167 		  EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
168 	return -EIO;
169 }
170 
171 /**************************************************************************
172  *
173  * Special buffer handling
174  * Special buffers are used for event queues and the TX and RX
175  * descriptor rings.
176  *
177  *************************************************************************/
178 
179 /*
180  * Initialise a special buffer
181  *
182  * This will define a buffer (previously allocated via
183  * efx_alloc_special_buffer()) in the buffer table, allowing
184  * it to be used for event queues, descriptor rings etc.
185  */
186 static void
187 efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
188 {
189 	efx_qword_t buf_desc;
190 	int index;
191 	dma_addr_t dma_addr;
192 	int i;
193 
194 	EFX_BUG_ON_PARANOID(!buffer->addr);
195 
196 	/* Write buffer descriptors to NIC */
197 	for (i = 0; i < buffer->entries; i++) {
198 		index = buffer->index + i;
199 		dma_addr = buffer->dma_addr + (i * 4096);
200 		netif_dbg(efx, probe, efx->net_dev,
201 			  "mapping special buffer %d at %llx\n",
202 			  index, (unsigned long long)dma_addr);
203 		EFX_POPULATE_QWORD_3(buf_desc,
204 				     FRF_AZ_BUF_ADR_REGION, 0,
205 				     FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
206 				     FRF_AZ_BUF_OWNER_ID_FBUF, 0);
207 		efx_write_buf_tbl(efx, &buf_desc, index);
208 	}
209 }
210 
211 /* Unmaps a buffer and clears the buffer table entries */
212 static void
213 efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
214 {
215 	efx_oword_t buf_tbl_upd;
216 	unsigned int start = buffer->index;
217 	unsigned int end = (buffer->index + buffer->entries - 1);
218 
219 	if (!buffer->entries)
220 		return;
221 
222 	netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
223 		  buffer->index, buffer->index + buffer->entries - 1);
224 
225 	EFX_POPULATE_OWORD_4(buf_tbl_upd,
226 			     FRF_AZ_BUF_UPD_CMD, 0,
227 			     FRF_AZ_BUF_CLR_CMD, 1,
228 			     FRF_AZ_BUF_CLR_END_ID, end,
229 			     FRF_AZ_BUF_CLR_START_ID, start);
230 	efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
231 }
232 
233 /*
234  * Allocate a new special buffer
235  *
236  * This allocates memory for a new buffer, clears it and allocates a
237  * new buffer ID range.  It does not write into the buffer table.
238  *
239  * This call will allocate 4KB buffers, since 8KB buffers can't be
240  * used for event queues and descriptor rings.
241  */
242 static int efx_alloc_special_buffer(struct efx_nic *efx,
243 				    struct efx_special_buffer *buffer,
244 				    unsigned int len)
245 {
246 	len = ALIGN(len, EFX_BUF_SIZE);
247 
248 	buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
249 					  &buffer->dma_addr, GFP_KERNEL);
250 	if (!buffer->addr)
251 		return -ENOMEM;
252 	buffer->len = len;
253 	buffer->entries = len / EFX_BUF_SIZE;
254 	BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
255 
256 	/* All zeros is a potentially valid event so memset to 0xff */
257 	memset(buffer->addr, 0xff, len);
258 
259 	/* Select new buffer ID */
260 	buffer->index = efx->next_buffer_table;
261 	efx->next_buffer_table += buffer->entries;
262 
263 	netif_dbg(efx, probe, efx->net_dev,
264 		  "allocating special buffers %d-%d at %llx+%x "
265 		  "(virt %p phys %llx)\n", buffer->index,
266 		  buffer->index + buffer->entries - 1,
267 		  (u64)buffer->dma_addr, len,
268 		  buffer->addr, (u64)virt_to_phys(buffer->addr));
269 
270 	return 0;
271 }
272 
273 static void
274 efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
275 {
276 	if (!buffer->addr)
277 		return;
278 
279 	netif_dbg(efx, hw, efx->net_dev,
280 		  "deallocating special buffers %d-%d at %llx+%x "
281 		  "(virt %p phys %llx)\n", buffer->index,
282 		  buffer->index + buffer->entries - 1,
283 		  (u64)buffer->dma_addr, buffer->len,
284 		  buffer->addr, (u64)virt_to_phys(buffer->addr));
285 
286 	dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
287 			  buffer->dma_addr);
288 	buffer->addr = NULL;
289 	buffer->entries = 0;
290 }
291 
292 /**************************************************************************
293  *
294  * Generic buffer handling
295  * These buffers are used for interrupt status and MAC stats
296  *
297  **************************************************************************/
298 
299 int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
300 			 unsigned int len)
301 {
302 	buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
303 					    &buffer->dma_addr);
304 	if (!buffer->addr)
305 		return -ENOMEM;
306 	buffer->len = len;
307 	memset(buffer->addr, 0, len);
308 	return 0;
309 }
310 
311 void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
312 {
313 	if (buffer->addr) {
314 		pci_free_consistent(efx->pci_dev, buffer->len,
315 				    buffer->addr, buffer->dma_addr);
316 		buffer->addr = NULL;
317 	}
318 }
319 
320 /**************************************************************************
321  *
322  * TX path
323  *
324  **************************************************************************/
325 
326 /* Returns a pointer to the specified transmit descriptor in the TX
327  * descriptor queue belonging to the specified channel.
328  */
329 static inline efx_qword_t *
330 efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
331 {
332 	return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
333 }
334 
335 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
336 static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
337 {
338 	unsigned write_ptr;
339 	efx_dword_t reg;
340 
341 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
342 	EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
343 	efx_writed_page(tx_queue->efx, &reg,
344 			FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
345 }
346 
347 /* Write pointer and first descriptor for TX descriptor ring */
348 static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
349 				    const efx_qword_t *txd)
350 {
351 	unsigned write_ptr;
352 	efx_oword_t reg;
353 
354 	BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
355 	BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
356 
357 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
358 	EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
359 			     FRF_AZ_TX_DESC_WPTR, write_ptr);
360 	reg.qword[0] = *txd;
361 	efx_writeo_page(tx_queue->efx, &reg,
362 			FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
363 }
364 
365 static inline bool
366 efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
367 {
368 	unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
369 
370 	if (empty_read_count == 0)
371 		return false;
372 
373 	tx_queue->empty_read_count = 0;
374 	return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
375 }
376 
377 /* For each entry inserted into the software descriptor ring, create a
378  * descriptor in the hardware TX descriptor ring (in host memory), and
379  * write a doorbell.
380  */
381 void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
382 {
383 
384 	struct efx_tx_buffer *buffer;
385 	efx_qword_t *txd;
386 	unsigned write_ptr;
387 	unsigned old_write_count = tx_queue->write_count;
388 
389 	BUG_ON(tx_queue->write_count == tx_queue->insert_count);
390 
391 	do {
392 		write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
393 		buffer = &tx_queue->buffer[write_ptr];
394 		txd = efx_tx_desc(tx_queue, write_ptr);
395 		++tx_queue->write_count;
396 
397 		/* Create TX descriptor ring entry */
398 		EFX_POPULATE_QWORD_4(*txd,
399 				     FSF_AZ_TX_KER_CONT, buffer->continuation,
400 				     FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
401 				     FSF_AZ_TX_KER_BUF_REGION, 0,
402 				     FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
403 	} while (tx_queue->write_count != tx_queue->insert_count);
404 
405 	wmb(); /* Ensure descriptors are written before they are fetched */
406 
407 	if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
408 		txd = efx_tx_desc(tx_queue,
409 				  old_write_count & tx_queue->ptr_mask);
410 		efx_push_tx_desc(tx_queue, txd);
411 		++tx_queue->pushes;
412 	} else {
413 		efx_notify_tx_desc(tx_queue);
414 	}
415 }
416 
417 /* Allocate hardware resources for a TX queue */
418 int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
419 {
420 	struct efx_nic *efx = tx_queue->efx;
421 	unsigned entries;
422 
423 	entries = tx_queue->ptr_mask + 1;
424 	return efx_alloc_special_buffer(efx, &tx_queue->txd,
425 					entries * sizeof(efx_qword_t));
426 }
427 
428 void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
429 {
430 	struct efx_nic *efx = tx_queue->efx;
431 	efx_oword_t reg;
432 
433 	tx_queue->flushed = FLUSH_NONE;
434 
435 	/* Pin TX descriptor ring */
436 	efx_init_special_buffer(efx, &tx_queue->txd);
437 
438 	/* Push TX descriptor ring to card */
439 	EFX_POPULATE_OWORD_10(reg,
440 			      FRF_AZ_TX_DESCQ_EN, 1,
441 			      FRF_AZ_TX_ISCSI_DDIG_EN, 0,
442 			      FRF_AZ_TX_ISCSI_HDIG_EN, 0,
443 			      FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
444 			      FRF_AZ_TX_DESCQ_EVQ_ID,
445 			      tx_queue->channel->channel,
446 			      FRF_AZ_TX_DESCQ_OWNER_ID, 0,
447 			      FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
448 			      FRF_AZ_TX_DESCQ_SIZE,
449 			      __ffs(tx_queue->txd.entries),
450 			      FRF_AZ_TX_DESCQ_TYPE, 0,
451 			      FRF_BZ_TX_NON_IP_DROP_DIS, 1);
452 
453 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
454 		int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
455 		EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
456 		EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
457 				    !csum);
458 	}
459 
460 	efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
461 			 tx_queue->queue);
462 
463 	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
464 		/* Only 128 bits in this register */
465 		BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
466 
467 		efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
468 		if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
469 			clear_bit_le(tx_queue->queue, (void *)&reg);
470 		else
471 			set_bit_le(tx_queue->queue, (void *)&reg);
472 		efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
473 	}
474 
475 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
476 		EFX_POPULATE_OWORD_1(reg,
477 				     FRF_BZ_TX_PACE,
478 				     (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
479 				     FFE_BZ_TX_PACE_OFF :
480 				     FFE_BZ_TX_PACE_RESERVED);
481 		efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
482 				 tx_queue->queue);
483 	}
484 }
485 
486 static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
487 {
488 	struct efx_nic *efx = tx_queue->efx;
489 	efx_oword_t tx_flush_descq;
490 
491 	tx_queue->flushed = FLUSH_PENDING;
492 
493 	/* Post a flush command */
494 	EFX_POPULATE_OWORD_2(tx_flush_descq,
495 			     FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
496 			     FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
497 	efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
498 }
499 
500 void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
501 {
502 	struct efx_nic *efx = tx_queue->efx;
503 	efx_oword_t tx_desc_ptr;
504 
505 	/* The queue should have been flushed */
506 	WARN_ON(tx_queue->flushed != FLUSH_DONE);
507 
508 	/* Remove TX descriptor ring from card */
509 	EFX_ZERO_OWORD(tx_desc_ptr);
510 	efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
511 			 tx_queue->queue);
512 
513 	/* Unpin TX descriptor ring */
514 	efx_fini_special_buffer(efx, &tx_queue->txd);
515 }
516 
517 /* Free buffers backing TX queue */
518 void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
519 {
520 	efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
521 }
522 
523 /**************************************************************************
524  *
525  * RX path
526  *
527  **************************************************************************/
528 
529 /* Returns a pointer to the specified descriptor in the RX descriptor queue */
530 static inline efx_qword_t *
531 efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
532 {
533 	return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
534 }
535 
536 /* This creates an entry in the RX descriptor queue */
537 static inline void
538 efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
539 {
540 	struct efx_rx_buffer *rx_buf;
541 	efx_qword_t *rxd;
542 
543 	rxd = efx_rx_desc(rx_queue, index);
544 	rx_buf = efx_rx_buffer(rx_queue, index);
545 	EFX_POPULATE_QWORD_3(*rxd,
546 			     FSF_AZ_RX_KER_BUF_SIZE,
547 			     rx_buf->len -
548 			     rx_queue->efx->type->rx_buffer_padding,
549 			     FSF_AZ_RX_KER_BUF_REGION, 0,
550 			     FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
551 }
552 
553 /* This writes to the RX_DESC_WPTR register for the specified receive
554  * descriptor ring.
555  */
556 void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
557 {
558 	struct efx_nic *efx = rx_queue->efx;
559 	efx_dword_t reg;
560 	unsigned write_ptr;
561 
562 	while (rx_queue->notified_count != rx_queue->added_count) {
563 		efx_build_rx_desc(
564 			rx_queue,
565 			rx_queue->notified_count & rx_queue->ptr_mask);
566 		++rx_queue->notified_count;
567 	}
568 
569 	wmb();
570 	write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
571 	EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
572 	efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
573 			efx_rx_queue_index(rx_queue));
574 }
575 
576 int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
577 {
578 	struct efx_nic *efx = rx_queue->efx;
579 	unsigned entries;
580 
581 	entries = rx_queue->ptr_mask + 1;
582 	return efx_alloc_special_buffer(efx, &rx_queue->rxd,
583 					entries * sizeof(efx_qword_t));
584 }
585 
586 void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
587 {
588 	efx_oword_t rx_desc_ptr;
589 	struct efx_nic *efx = rx_queue->efx;
590 	bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
591 	bool iscsi_digest_en = is_b0;
592 
593 	netif_dbg(efx, hw, efx->net_dev,
594 		  "RX queue %d ring in special buffers %d-%d\n",
595 		  efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
596 		  rx_queue->rxd.index + rx_queue->rxd.entries - 1);
597 
598 	rx_queue->flushed = FLUSH_NONE;
599 
600 	/* Pin RX descriptor ring */
601 	efx_init_special_buffer(efx, &rx_queue->rxd);
602 
603 	/* Push RX descriptor ring to card */
604 	EFX_POPULATE_OWORD_10(rx_desc_ptr,
605 			      FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
606 			      FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
607 			      FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
608 			      FRF_AZ_RX_DESCQ_EVQ_ID,
609 			      efx_rx_queue_channel(rx_queue)->channel,
610 			      FRF_AZ_RX_DESCQ_OWNER_ID, 0,
611 			      FRF_AZ_RX_DESCQ_LABEL,
612 			      efx_rx_queue_index(rx_queue),
613 			      FRF_AZ_RX_DESCQ_SIZE,
614 			      __ffs(rx_queue->rxd.entries),
615 			      FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
616 			      /* For >=B0 this is scatter so disable */
617 			      FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
618 			      FRF_AZ_RX_DESCQ_EN, 1);
619 	efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
620 			 efx_rx_queue_index(rx_queue));
621 }
622 
623 static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
624 {
625 	struct efx_nic *efx = rx_queue->efx;
626 	efx_oword_t rx_flush_descq;
627 
628 	rx_queue->flushed = FLUSH_PENDING;
629 
630 	/* Post a flush command */
631 	EFX_POPULATE_OWORD_2(rx_flush_descq,
632 			     FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
633 			     FRF_AZ_RX_FLUSH_DESCQ,
634 			     efx_rx_queue_index(rx_queue));
635 	efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
636 }
637 
638 void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
639 {
640 	efx_oword_t rx_desc_ptr;
641 	struct efx_nic *efx = rx_queue->efx;
642 
643 	/* The queue should already have been flushed */
644 	WARN_ON(rx_queue->flushed != FLUSH_DONE);
645 
646 	/* Remove RX descriptor ring from card */
647 	EFX_ZERO_OWORD(rx_desc_ptr);
648 	efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
649 			 efx_rx_queue_index(rx_queue));
650 
651 	/* Unpin RX descriptor ring */
652 	efx_fini_special_buffer(efx, &rx_queue->rxd);
653 }
654 
655 /* Free buffers backing RX queue */
656 void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
657 {
658 	efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
659 }
660 
661 /**************************************************************************
662  *
663  * Event queue processing
664  * Event queues are processed by per-channel tasklets.
665  *
666  **************************************************************************/
667 
668 /* Update a channel's event queue's read pointer (RPTR) register
669  *
670  * This writes the EVQ_RPTR_REG register for the specified channel's
671  * event queue.
672  */
673 void efx_nic_eventq_read_ack(struct efx_channel *channel)
674 {
675 	efx_dword_t reg;
676 	struct efx_nic *efx = channel->efx;
677 
678 	EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
679 			     channel->eventq_read_ptr & channel->eventq_mask);
680 	efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
681 			 channel->channel);
682 }
683 
684 /* Use HW to insert a SW defined event */
685 static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
686 {
687 	efx_oword_t drv_ev_reg;
688 
689 	BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
690 		     FRF_AZ_DRV_EV_DATA_WIDTH != 64);
691 	drv_ev_reg.u32[0] = event->u32[0];
692 	drv_ev_reg.u32[1] = event->u32[1];
693 	drv_ev_reg.u32[2] = 0;
694 	drv_ev_reg.u32[3] = 0;
695 	EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
696 	efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
697 }
698 
699 /* Handle a transmit completion event
700  *
701  * The NIC batches TX completion events; the message we receive is of
702  * the form "complete all TX events up to this index".
703  */
704 static int
705 efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
706 {
707 	unsigned int tx_ev_desc_ptr;
708 	unsigned int tx_ev_q_label;
709 	struct efx_tx_queue *tx_queue;
710 	struct efx_nic *efx = channel->efx;
711 	int tx_packets = 0;
712 
713 	if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
714 		/* Transmit completion */
715 		tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
716 		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
717 		tx_queue = efx_channel_get_tx_queue(
718 			channel, tx_ev_q_label % EFX_TXQ_TYPES);
719 		tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
720 			      tx_queue->ptr_mask);
721 		channel->irq_mod_score += tx_packets;
722 		efx_xmit_done(tx_queue, tx_ev_desc_ptr);
723 	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
724 		/* Rewrite the FIFO write pointer */
725 		tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
726 		tx_queue = efx_channel_get_tx_queue(
727 			channel, tx_ev_q_label % EFX_TXQ_TYPES);
728 
729 		if (efx_dev_registered(efx))
730 			netif_tx_lock(efx->net_dev);
731 		efx_notify_tx_desc(tx_queue);
732 		if (efx_dev_registered(efx))
733 			netif_tx_unlock(efx->net_dev);
734 	} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
735 		   EFX_WORKAROUND_10727(efx)) {
736 		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
737 	} else {
738 		netif_err(efx, tx_err, efx->net_dev,
739 			  "channel %d unexpected TX event "
740 			  EFX_QWORD_FMT"\n", channel->channel,
741 			  EFX_QWORD_VAL(*event));
742 	}
743 
744 	return tx_packets;
745 }
746 
747 /* Detect errors included in the rx_evt_pkt_ok bit. */
748 static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
749 				 const efx_qword_t *event,
750 				 bool *rx_ev_pkt_ok,
751 				 bool *discard)
752 {
753 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
754 	struct efx_nic *efx = rx_queue->efx;
755 	bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
756 	bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
757 	bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
758 	bool rx_ev_other_err, rx_ev_pause_frm;
759 	bool rx_ev_hdr_type, rx_ev_mcast_pkt;
760 	unsigned rx_ev_pkt_type;
761 
762 	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
763 	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
764 	rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
765 	rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
766 	rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
767 						 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
768 	rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
769 						  FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
770 	rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
771 						   FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
772 	rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
773 	rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
774 	rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
775 			  0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
776 	rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
777 
778 	/* Every error apart from tobe_disc and pause_frm */
779 	rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
780 			   rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
781 			   rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
782 
783 	/* Count errors that are not in MAC stats.  Ignore expected
784 	 * checksum errors during self-test. */
785 	if (rx_ev_frm_trunc)
786 		++channel->n_rx_frm_trunc;
787 	else if (rx_ev_tobe_disc)
788 		++channel->n_rx_tobe_disc;
789 	else if (!efx->loopback_selftest) {
790 		if (rx_ev_ip_hdr_chksum_err)
791 			++channel->n_rx_ip_hdr_chksum_err;
792 		else if (rx_ev_tcp_udp_chksum_err)
793 			++channel->n_rx_tcp_udp_chksum_err;
794 	}
795 
796 	/* The frame must be discarded if any of these are true. */
797 	*discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
798 		    rx_ev_tobe_disc | rx_ev_pause_frm);
799 
800 	/* TOBE_DISC is expected on unicast mismatches; don't print out an
801 	 * error message.  FRM_TRUNC indicates RXDP dropped the packet due
802 	 * to a FIFO overflow.
803 	 */
804 #ifdef EFX_ENABLE_DEBUG
805 	if (rx_ev_other_err && net_ratelimit()) {
806 		netif_dbg(efx, rx_err, efx->net_dev,
807 			  " RX queue %d unexpected RX event "
808 			  EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
809 			  efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
810 			  rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
811 			  rx_ev_ip_hdr_chksum_err ?
812 			  " [IP_HDR_CHKSUM_ERR]" : "",
813 			  rx_ev_tcp_udp_chksum_err ?
814 			  " [TCP_UDP_CHKSUM_ERR]" : "",
815 			  rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
816 			  rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
817 			  rx_ev_drib_nib ? " [DRIB_NIB]" : "",
818 			  rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
819 			  rx_ev_pause_frm ? " [PAUSE]" : "");
820 	}
821 #endif
822 }
823 
824 /* Handle receive events that are not in-order. */
825 static void
826 efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
827 {
828 	struct efx_nic *efx = rx_queue->efx;
829 	unsigned expected, dropped;
830 
831 	expected = rx_queue->removed_count & rx_queue->ptr_mask;
832 	dropped = (index - expected) & rx_queue->ptr_mask;
833 	netif_info(efx, rx_err, efx->net_dev,
834 		   "dropped %d events (index=%d expected=%d)\n",
835 		   dropped, index, expected);
836 
837 	efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
838 			   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
839 }
840 
841 /* Handle a packet received event
842  *
843  * The NIC gives a "discard" flag if it's a unicast packet with the
844  * wrong destination address
845  * Also "is multicast" and "matches multicast filter" flags can be used to
846  * discard non-matching multicast packets.
847  */
848 static void
849 efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
850 {
851 	unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
852 	unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
853 	unsigned expected_ptr;
854 	bool rx_ev_pkt_ok, discard = false, checksummed;
855 	struct efx_rx_queue *rx_queue;
856 
857 	/* Basic packet information */
858 	rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
859 	rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
860 	rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
861 	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
862 	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
863 	WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
864 		channel->channel);
865 
866 	rx_queue = efx_channel_get_rx_queue(channel);
867 
868 	rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
869 	expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
870 	if (unlikely(rx_ev_desc_ptr != expected_ptr))
871 		efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
872 
873 	if (likely(rx_ev_pkt_ok)) {
874 		/* If packet is marked as OK and packet type is TCP/IP or
875 		 * UDP/IP, then we can rely on the hardware checksum.
876 		 */
877 		checksummed =
878 			rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
879 			rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
880 	} else {
881 		efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
882 		checksummed = false;
883 	}
884 
885 	/* Detect multicast packets that didn't match the filter */
886 	rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
887 	if (rx_ev_mcast_pkt) {
888 		unsigned int rx_ev_mcast_hash_match =
889 			EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
890 
891 		if (unlikely(!rx_ev_mcast_hash_match)) {
892 			++channel->n_rx_mcast_mismatch;
893 			discard = true;
894 		}
895 	}
896 
897 	channel->irq_mod_score += 2;
898 
899 	/* Handle received packet */
900 	efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
901 		      checksummed, discard);
902 }
903 
904 static void
905 efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
906 {
907 	struct efx_nic *efx = channel->efx;
908 	unsigned code;
909 
910 	code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
911 	if (code == EFX_CHANNEL_MAGIC_TEST(channel))
912 		; /* ignore */
913 	else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
914 		/* The queue must be empty, so we won't receive any rx
915 		 * events, so efx_process_channel() won't refill the
916 		 * queue. Refill it here */
917 		efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel));
918 	else
919 		netif_dbg(efx, hw, efx->net_dev, "channel %d received "
920 			  "generated event "EFX_QWORD_FMT"\n",
921 			  channel->channel, EFX_QWORD_VAL(*event));
922 }
923 
924 static void
925 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
926 {
927 	struct efx_nic *efx = channel->efx;
928 	unsigned int ev_sub_code;
929 	unsigned int ev_sub_data;
930 
931 	ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
932 	ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
933 
934 	switch (ev_sub_code) {
935 	case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
936 		netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
937 			   channel->channel, ev_sub_data);
938 		break;
939 	case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
940 		netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
941 			   channel->channel, ev_sub_data);
942 		break;
943 	case FSE_AZ_EVQ_INIT_DONE_EV:
944 		netif_dbg(efx, hw, efx->net_dev,
945 			  "channel %d EVQ %d initialised\n",
946 			  channel->channel, ev_sub_data);
947 		break;
948 	case FSE_AZ_SRM_UPD_DONE_EV:
949 		netif_vdbg(efx, hw, efx->net_dev,
950 			   "channel %d SRAM update done\n", channel->channel);
951 		break;
952 	case FSE_AZ_WAKE_UP_EV:
953 		netif_vdbg(efx, hw, efx->net_dev,
954 			   "channel %d RXQ %d wakeup event\n",
955 			   channel->channel, ev_sub_data);
956 		break;
957 	case FSE_AZ_TIMER_EV:
958 		netif_vdbg(efx, hw, efx->net_dev,
959 			   "channel %d RX queue %d timer expired\n",
960 			   channel->channel, ev_sub_data);
961 		break;
962 	case FSE_AA_RX_RECOVER_EV:
963 		netif_err(efx, rx_err, efx->net_dev,
964 			  "channel %d seen DRIVER RX_RESET event. "
965 			"Resetting.\n", channel->channel);
966 		atomic_inc(&efx->rx_reset);
967 		efx_schedule_reset(efx,
968 				   EFX_WORKAROUND_6555(efx) ?
969 				   RESET_TYPE_RX_RECOVERY :
970 				   RESET_TYPE_DISABLE);
971 		break;
972 	case FSE_BZ_RX_DSC_ERROR_EV:
973 		netif_err(efx, rx_err, efx->net_dev,
974 			  "RX DMA Q %d reports descriptor fetch error."
975 			  " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
976 		efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
977 		break;
978 	case FSE_BZ_TX_DSC_ERROR_EV:
979 		netif_err(efx, tx_err, efx->net_dev,
980 			  "TX DMA Q %d reports descriptor fetch error."
981 			  " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
982 		efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
983 		break;
984 	default:
985 		netif_vdbg(efx, hw, efx->net_dev,
986 			   "channel %d unknown driver event code %d "
987 			   "data %04x\n", channel->channel, ev_sub_code,
988 			   ev_sub_data);
989 		break;
990 	}
991 }
992 
993 int efx_nic_process_eventq(struct efx_channel *channel, int budget)
994 {
995 	struct efx_nic *efx = channel->efx;
996 	unsigned int read_ptr;
997 	efx_qword_t event, *p_event;
998 	int ev_code;
999 	int tx_packets = 0;
1000 	int spent = 0;
1001 
1002 	read_ptr = channel->eventq_read_ptr;
1003 
1004 	for (;;) {
1005 		p_event = efx_event(channel, read_ptr);
1006 		event = *p_event;
1007 
1008 		if (!efx_event_present(&event))
1009 			/* End of events */
1010 			break;
1011 
1012 		netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1013 			   "channel %d event is "EFX_QWORD_FMT"\n",
1014 			   channel->channel, EFX_QWORD_VAL(event));
1015 
1016 		/* Clear this event by marking it all ones */
1017 		EFX_SET_QWORD(*p_event);
1018 
1019 		++read_ptr;
1020 
1021 		ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1022 
1023 		switch (ev_code) {
1024 		case FSE_AZ_EV_CODE_RX_EV:
1025 			efx_handle_rx_event(channel, &event);
1026 			if (++spent == budget)
1027 				goto out;
1028 			break;
1029 		case FSE_AZ_EV_CODE_TX_EV:
1030 			tx_packets += efx_handle_tx_event(channel, &event);
1031 			if (tx_packets > efx->txq_entries) {
1032 				spent = budget;
1033 				goto out;
1034 			}
1035 			break;
1036 		case FSE_AZ_EV_CODE_DRV_GEN_EV:
1037 			efx_handle_generated_event(channel, &event);
1038 			break;
1039 		case FSE_AZ_EV_CODE_DRIVER_EV:
1040 			efx_handle_driver_event(channel, &event);
1041 			break;
1042 		case FSE_CZ_EV_CODE_MCDI_EV:
1043 			efx_mcdi_process_event(channel, &event);
1044 			break;
1045 		case FSE_AZ_EV_CODE_GLOBAL_EV:
1046 			if (efx->type->handle_global_event &&
1047 			    efx->type->handle_global_event(channel, &event))
1048 				break;
1049 			/* else fall through */
1050 		default:
1051 			netif_err(channel->efx, hw, channel->efx->net_dev,
1052 				  "channel %d unknown event type %d (data "
1053 				  EFX_QWORD_FMT ")\n", channel->channel,
1054 				  ev_code, EFX_QWORD_VAL(event));
1055 		}
1056 	}
1057 
1058 out:
1059 	channel->eventq_read_ptr = read_ptr;
1060 	return spent;
1061 }
1062 
1063 /* Check whether an event is present in the eventq at the current
1064  * read pointer.  Only useful for self-test.
1065  */
1066 bool efx_nic_event_present(struct efx_channel *channel)
1067 {
1068 	return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
1069 }
1070 
1071 /* Allocate buffer table entries for event queue */
1072 int efx_nic_probe_eventq(struct efx_channel *channel)
1073 {
1074 	struct efx_nic *efx = channel->efx;
1075 	unsigned entries;
1076 
1077 	entries = channel->eventq_mask + 1;
1078 	return efx_alloc_special_buffer(efx, &channel->eventq,
1079 					entries * sizeof(efx_qword_t));
1080 }
1081 
1082 void efx_nic_init_eventq(struct efx_channel *channel)
1083 {
1084 	efx_oword_t reg;
1085 	struct efx_nic *efx = channel->efx;
1086 
1087 	netif_dbg(efx, hw, efx->net_dev,
1088 		  "channel %d event queue in special buffers %d-%d\n",
1089 		  channel->channel, channel->eventq.index,
1090 		  channel->eventq.index + channel->eventq.entries - 1);
1091 
1092 	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1093 		EFX_POPULATE_OWORD_3(reg,
1094 				     FRF_CZ_TIMER_Q_EN, 1,
1095 				     FRF_CZ_HOST_NOTIFY_MODE, 0,
1096 				     FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
1097 		efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1098 	}
1099 
1100 	/* Pin event queue buffer */
1101 	efx_init_special_buffer(efx, &channel->eventq);
1102 
1103 	/* Fill event queue with all ones (i.e. empty events) */
1104 	memset(channel->eventq.addr, 0xff, channel->eventq.len);
1105 
1106 	/* Push event queue to card */
1107 	EFX_POPULATE_OWORD_3(reg,
1108 			     FRF_AZ_EVQ_EN, 1,
1109 			     FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1110 			     FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1111 	efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1112 			 channel->channel);
1113 
1114 	efx->type->push_irq_moderation(channel);
1115 }
1116 
1117 void efx_nic_fini_eventq(struct efx_channel *channel)
1118 {
1119 	efx_oword_t reg;
1120 	struct efx_nic *efx = channel->efx;
1121 
1122 	/* Remove event queue from card */
1123 	EFX_ZERO_OWORD(reg);
1124 	efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
1125 			 channel->channel);
1126 	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1127 		efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
1128 
1129 	/* Unpin event queue */
1130 	efx_fini_special_buffer(efx, &channel->eventq);
1131 }
1132 
1133 /* Free buffers backing event queue */
1134 void efx_nic_remove_eventq(struct efx_channel *channel)
1135 {
1136 	efx_free_special_buffer(channel->efx, &channel->eventq);
1137 }
1138 
1139 
1140 void efx_nic_generate_test_event(struct efx_channel *channel)
1141 {
1142 	unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
1143 	efx_qword_t test_event;
1144 
1145 	EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1146 			     FSE_AZ_EV_CODE_DRV_GEN_EV,
1147 			     FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1148 	efx_generate_event(channel, &test_event);
1149 }
1150 
1151 void efx_nic_generate_fill_event(struct efx_channel *channel)
1152 {
1153 	unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
1154 	efx_qword_t test_event;
1155 
1156 	EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1157 			     FSE_AZ_EV_CODE_DRV_GEN_EV,
1158 			     FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1159 	efx_generate_event(channel, &test_event);
1160 }
1161 
1162 /**************************************************************************
1163  *
1164  * Flush handling
1165  *
1166  **************************************************************************/
1167 
1168 
1169 static void efx_poll_flush_events(struct efx_nic *efx)
1170 {
1171 	struct efx_channel *channel = efx_get_channel(efx, 0);
1172 	struct efx_tx_queue *tx_queue;
1173 	struct efx_rx_queue *rx_queue;
1174 	unsigned int read_ptr = channel->eventq_read_ptr;
1175 	unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
1176 
1177 	do {
1178 		efx_qword_t *event = efx_event(channel, read_ptr);
1179 		int ev_code, ev_sub_code, ev_queue;
1180 		bool ev_failed;
1181 
1182 		if (!efx_event_present(event))
1183 			break;
1184 
1185 		ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1186 		ev_sub_code = EFX_QWORD_FIELD(*event,
1187 					      FSF_AZ_DRIVER_EV_SUBCODE);
1188 		if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1189 		    ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1190 			ev_queue = EFX_QWORD_FIELD(*event,
1191 						   FSF_AZ_DRIVER_EV_SUBDATA);
1192 			if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1193 				tx_queue = efx_get_tx_queue(
1194 					efx, ev_queue / EFX_TXQ_TYPES,
1195 					ev_queue % EFX_TXQ_TYPES);
1196 				tx_queue->flushed = FLUSH_DONE;
1197 			}
1198 		} else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1199 			   ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1200 			ev_queue = EFX_QWORD_FIELD(
1201 				*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1202 			ev_failed = EFX_QWORD_FIELD(
1203 				*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1204 			if (ev_queue < efx->n_rx_channels) {
1205 				rx_queue = efx_get_rx_queue(efx, ev_queue);
1206 				rx_queue->flushed =
1207 					ev_failed ? FLUSH_FAILED : FLUSH_DONE;
1208 			}
1209 		}
1210 
1211 		/* We're about to destroy the queue anyway, so
1212 		 * it's ok to throw away every non-flush event */
1213 		EFX_SET_QWORD(*event);
1214 
1215 		++read_ptr;
1216 	} while (read_ptr != end_ptr);
1217 
1218 	channel->eventq_read_ptr = read_ptr;
1219 }
1220 
1221 /* Handle tx and rx flushes at the same time, since they run in
1222  * parallel in the hardware and there's no reason for us to
1223  * serialise them */
1224 int efx_nic_flush_queues(struct efx_nic *efx)
1225 {
1226 	struct efx_channel *channel;
1227 	struct efx_rx_queue *rx_queue;
1228 	struct efx_tx_queue *tx_queue;
1229 	int i, tx_pending, rx_pending;
1230 
1231 	/* If necessary prepare the hardware for flushing */
1232 	efx->type->prepare_flush(efx);
1233 
1234 	/* Flush all tx queues in parallel */
1235 	efx_for_each_channel(channel, efx) {
1236 		efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1237 			if (tx_queue->initialised)
1238 				efx_flush_tx_queue(tx_queue);
1239 		}
1240 	}
1241 
1242 	/* The hardware supports four concurrent rx flushes, each of which may
1243 	 * need to be retried if there is an outstanding descriptor fetch */
1244 	for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) {
1245 		rx_pending = tx_pending = 0;
1246 		efx_for_each_channel(channel, efx) {
1247 			efx_for_each_channel_rx_queue(rx_queue, channel) {
1248 				if (rx_queue->flushed == FLUSH_PENDING)
1249 					++rx_pending;
1250 			}
1251 		}
1252 		efx_for_each_channel(channel, efx) {
1253 			efx_for_each_channel_rx_queue(rx_queue, channel) {
1254 				if (rx_pending == EFX_RX_FLUSH_COUNT)
1255 					break;
1256 				if (rx_queue->flushed == FLUSH_FAILED ||
1257 				    rx_queue->flushed == FLUSH_NONE) {
1258 					efx_flush_rx_queue(rx_queue);
1259 					++rx_pending;
1260 				}
1261 			}
1262 			efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1263 				if (tx_queue->initialised &&
1264 				    tx_queue->flushed != FLUSH_DONE)
1265 					++tx_pending;
1266 			}
1267 		}
1268 
1269 		if (rx_pending == 0 && tx_pending == 0)
1270 			return 0;
1271 
1272 		msleep(EFX_FLUSH_INTERVAL);
1273 		efx_poll_flush_events(efx);
1274 	}
1275 
1276 	/* Mark the queues as all flushed. We're going to return failure
1277 	 * leading to a reset, or fake up success anyway */
1278 	efx_for_each_channel(channel, efx) {
1279 		efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
1280 			if (tx_queue->initialised &&
1281 			    tx_queue->flushed != FLUSH_DONE)
1282 				netif_err(efx, hw, efx->net_dev,
1283 					  "tx queue %d flush command timed out\n",
1284 					  tx_queue->queue);
1285 			tx_queue->flushed = FLUSH_DONE;
1286 		}
1287 		efx_for_each_channel_rx_queue(rx_queue, channel) {
1288 			if (rx_queue->flushed != FLUSH_DONE)
1289 				netif_err(efx, hw, efx->net_dev,
1290 					  "rx queue %d flush command timed out\n",
1291 					  efx_rx_queue_index(rx_queue));
1292 			rx_queue->flushed = FLUSH_DONE;
1293 		}
1294 	}
1295 
1296 	return -ETIMEDOUT;
1297 }
1298 
1299 /**************************************************************************
1300  *
1301  * Hardware interrupts
1302  * The hardware interrupt handler does very little work; all the event
1303  * queue processing is carried out by per-channel tasklets.
1304  *
1305  **************************************************************************/
1306 
1307 /* Enable/disable/generate interrupts */
1308 static inline void efx_nic_interrupts(struct efx_nic *efx,
1309 				      bool enabled, bool force)
1310 {
1311 	efx_oword_t int_en_reg_ker;
1312 
1313 	EFX_POPULATE_OWORD_3(int_en_reg_ker,
1314 			     FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
1315 			     FRF_AZ_KER_INT_KER, force,
1316 			     FRF_AZ_DRV_INT_EN_KER, enabled);
1317 	efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1318 }
1319 
1320 void efx_nic_enable_interrupts(struct efx_nic *efx)
1321 {
1322 	struct efx_channel *channel;
1323 
1324 	EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
1325 	wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1326 
1327 	/* Enable interrupts */
1328 	efx_nic_interrupts(efx, true, false);
1329 
1330 	/* Force processing of all the channels to get the EVQ RPTRs up to
1331 	   date */
1332 	efx_for_each_channel(channel, efx)
1333 		efx_schedule_channel(channel);
1334 }
1335 
1336 void efx_nic_disable_interrupts(struct efx_nic *efx)
1337 {
1338 	/* Disable interrupts */
1339 	efx_nic_interrupts(efx, false, false);
1340 }
1341 
1342 /* Generate a test interrupt
1343  * Interrupt must already have been enabled, otherwise nasty things
1344  * may happen.
1345  */
1346 void efx_nic_generate_interrupt(struct efx_nic *efx)
1347 {
1348 	efx_nic_interrupts(efx, true, true);
1349 }
1350 
1351 /* Process a fatal interrupt
1352  * Disable bus mastering ASAP and schedule a reset
1353  */
1354 irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1355 {
1356 	struct falcon_nic_data *nic_data = efx->nic_data;
1357 	efx_oword_t *int_ker = efx->irq_status.addr;
1358 	efx_oword_t fatal_intr;
1359 	int error, mem_perr;
1360 
1361 	efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1362 	error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1363 
1364 	netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
1365 		  EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1366 		  EFX_OWORD_VAL(fatal_intr),
1367 		  error ? "disabling bus mastering" : "no recognised error");
1368 
1369 	/* If this is a memory parity error dump which blocks are offending */
1370 	mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1371 		    EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1372 	if (mem_perr) {
1373 		efx_oword_t reg;
1374 		efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1375 		netif_err(efx, hw, efx->net_dev,
1376 			  "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
1377 			  EFX_OWORD_VAL(reg));
1378 	}
1379 
1380 	/* Disable both devices */
1381 	pci_clear_master(efx->pci_dev);
1382 	if (efx_nic_is_dual_func(efx))
1383 		pci_clear_master(nic_data->pci_dev2);
1384 	efx_nic_disable_interrupts(efx);
1385 
1386 	/* Count errors and reset or disable the NIC accordingly */
1387 	if (efx->int_error_count == 0 ||
1388 	    time_after(jiffies, efx->int_error_expire)) {
1389 		efx->int_error_count = 0;
1390 		efx->int_error_expire =
1391 			jiffies + EFX_INT_ERROR_EXPIRE * HZ;
1392 	}
1393 	if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
1394 		netif_err(efx, hw, efx->net_dev,
1395 			  "SYSTEM ERROR - reset scheduled\n");
1396 		efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1397 	} else {
1398 		netif_err(efx, hw, efx->net_dev,
1399 			  "SYSTEM ERROR - max number of errors seen."
1400 			  "NIC will be disabled\n");
1401 		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1402 	}
1403 
1404 	return IRQ_HANDLED;
1405 }
1406 
1407 /* Handle a legacy interrupt
1408  * Acknowledges the interrupt and schedule event queue processing.
1409  */
1410 static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1411 {
1412 	struct efx_nic *efx = dev_id;
1413 	efx_oword_t *int_ker = efx->irq_status.addr;
1414 	irqreturn_t result = IRQ_NONE;
1415 	struct efx_channel *channel;
1416 	efx_dword_t reg;
1417 	u32 queues;
1418 	int syserr;
1419 
1420 	/* Could this be ours?  If interrupts are disabled then the
1421 	 * channel state may not be valid.
1422 	 */
1423 	if (!efx->legacy_irq_enabled)
1424 		return result;
1425 
1426 	/* Read the ISR which also ACKs the interrupts */
1427 	efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1428 	queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1429 
1430 	/* Check to see if we have a serious error condition */
1431 	if (queues & (1U << efx->fatal_irq_level)) {
1432 		syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1433 		if (unlikely(syserr))
1434 			return efx_nic_fatal_interrupt(efx);
1435 	}
1436 
1437 	if (queues != 0) {
1438 		if (EFX_WORKAROUND_15783(efx))
1439 			efx->irq_zero_count = 0;
1440 
1441 		/* Schedule processing of any interrupting queues */
1442 		efx_for_each_channel(channel, efx) {
1443 			if (queues & 1)
1444 				efx_schedule_channel(channel);
1445 			queues >>= 1;
1446 		}
1447 		result = IRQ_HANDLED;
1448 
1449 	} else if (EFX_WORKAROUND_15783(efx)) {
1450 		efx_qword_t *event;
1451 
1452 		/* We can't return IRQ_HANDLED more than once on seeing ISR=0
1453 		 * because this might be a shared interrupt. */
1454 		if (efx->irq_zero_count++ == 0)
1455 			result = IRQ_HANDLED;
1456 
1457 		/* Ensure we schedule or rearm all event queues */
1458 		efx_for_each_channel(channel, efx) {
1459 			event = efx_event(channel, channel->eventq_read_ptr);
1460 			if (efx_event_present(event))
1461 				efx_schedule_channel(channel);
1462 			else
1463 				efx_nic_eventq_read_ack(channel);
1464 		}
1465 	}
1466 
1467 	if (result == IRQ_HANDLED) {
1468 		efx->last_irq_cpu = raw_smp_processor_id();
1469 		netif_vdbg(efx, intr, efx->net_dev,
1470 			   "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1471 			   irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1472 	}
1473 
1474 	return result;
1475 }
1476 
1477 /* Handle an MSI interrupt
1478  *
1479  * Handle an MSI hardware interrupt.  This routine schedules event
1480  * queue processing.  No interrupt acknowledgement cycle is necessary.
1481  * Also, we never need to check that the interrupt is for us, since
1482  * MSI interrupts cannot be shared.
1483  */
1484 static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1485 {
1486 	struct efx_channel *channel = *(struct efx_channel **)dev_id;
1487 	struct efx_nic *efx = channel->efx;
1488 	efx_oword_t *int_ker = efx->irq_status.addr;
1489 	int syserr;
1490 
1491 	efx->last_irq_cpu = raw_smp_processor_id();
1492 	netif_vdbg(efx, intr, efx->net_dev,
1493 		   "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
1494 		   irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1495 
1496 	/* Check to see if we have a serious error condition */
1497 	if (channel->channel == efx->fatal_irq_level) {
1498 		syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1499 		if (unlikely(syserr))
1500 			return efx_nic_fatal_interrupt(efx);
1501 	}
1502 
1503 	/* Schedule processing of the channel */
1504 	efx_schedule_channel(channel);
1505 
1506 	return IRQ_HANDLED;
1507 }
1508 
1509 
1510 /* Setup RSS indirection table.
1511  * This maps from the hash value of the packet to RXQ
1512  */
1513 void efx_nic_push_rx_indir_table(struct efx_nic *efx)
1514 {
1515 	size_t i = 0;
1516 	efx_dword_t dword;
1517 
1518 	if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1519 		return;
1520 
1521 	BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1522 		     FR_BZ_RX_INDIRECTION_TBL_ROWS);
1523 
1524 	for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1525 		EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1526 				     efx->rx_indir_table[i]);
1527 		efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
1528 	}
1529 }
1530 
1531 /* Hook interrupt handler(s)
1532  * Try MSI and then legacy interrupts.
1533  */
1534 int efx_nic_init_interrupt(struct efx_nic *efx)
1535 {
1536 	struct efx_channel *channel;
1537 	int rc;
1538 
1539 	if (!EFX_INT_MODE_USE_MSI(efx)) {
1540 		irq_handler_t handler;
1541 		if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1542 			handler = efx_legacy_interrupt;
1543 		else
1544 			handler = falcon_legacy_interrupt_a1;
1545 
1546 		rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
1547 				 efx->name, efx);
1548 		if (rc) {
1549 			netif_err(efx, drv, efx->net_dev,
1550 				  "failed to hook legacy IRQ %d\n",
1551 				  efx->pci_dev->irq);
1552 			goto fail1;
1553 		}
1554 		return 0;
1555 	}
1556 
1557 	/* Hook MSI or MSI-X interrupt */
1558 	efx_for_each_channel(channel, efx) {
1559 		rc = request_irq(channel->irq, efx_msi_interrupt,
1560 				 IRQF_PROBE_SHARED, /* Not shared */
1561 				 efx->channel_name[channel->channel],
1562 				 &efx->channel[channel->channel]);
1563 		if (rc) {
1564 			netif_err(efx, drv, efx->net_dev,
1565 				  "failed to hook IRQ %d\n", channel->irq);
1566 			goto fail2;
1567 		}
1568 	}
1569 
1570 	return 0;
1571 
1572  fail2:
1573 	efx_for_each_channel(channel, efx)
1574 		free_irq(channel->irq, &efx->channel[channel->channel]);
1575  fail1:
1576 	return rc;
1577 }
1578 
1579 void efx_nic_fini_interrupt(struct efx_nic *efx)
1580 {
1581 	struct efx_channel *channel;
1582 	efx_oword_t reg;
1583 
1584 	/* Disable MSI/MSI-X interrupts */
1585 	efx_for_each_channel(channel, efx) {
1586 		if (channel->irq)
1587 			free_irq(channel->irq, &efx->channel[channel->channel]);
1588 	}
1589 
1590 	/* ACK legacy interrupt */
1591 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1592 		efx_reado(efx, &reg, FR_BZ_INT_ISR0);
1593 	else
1594 		falcon_irq_ack_a1(efx);
1595 
1596 	/* Disable legacy interrupt */
1597 	if (efx->legacy_irq)
1598 		free_irq(efx->legacy_irq, efx);
1599 }
1600 
1601 u32 efx_nic_fpga_ver(struct efx_nic *efx)
1602 {
1603 	efx_oword_t altera_build;
1604 	efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1605 	return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1606 }
1607 
1608 void efx_nic_init_common(struct efx_nic *efx)
1609 {
1610 	efx_oword_t temp;
1611 
1612 	/* Set positions of descriptor caches in SRAM. */
1613 	EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR,
1614 			     efx->type->tx_dc_base / 8);
1615 	efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1616 	EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR,
1617 			     efx->type->rx_dc_base / 8);
1618 	efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1619 
1620 	/* Set TX descriptor cache size. */
1621 	BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1622 	EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1623 	efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1624 
1625 	/* Set RX descriptor cache size.  Set low watermark to size-8, as
1626 	 * this allows most efficient prefetching.
1627 	 */
1628 	BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1629 	EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1630 	efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1631 	EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1632 	efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1633 
1634 	/* Program INT_KER address */
1635 	EFX_POPULATE_OWORD_2(temp,
1636 			     FRF_AZ_NORM_INT_VEC_DIS_KER,
1637 			     EFX_INT_MODE_USE_MSI(efx),
1638 			     FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1639 	efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1640 
1641 	if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1642 		/* Use an interrupt level unused by event queues */
1643 		efx->fatal_irq_level = 0x1f;
1644 	else
1645 		/* Use a valid MSI-X vector */
1646 		efx->fatal_irq_level = 0;
1647 
1648 	/* Enable all the genuinely fatal interrupts.  (They are still
1649 	 * masked by the overall interrupt mask, controlled by
1650 	 * falcon_interrupts()).
1651 	 *
1652 	 * Note: All other fatal interrupts are enabled
1653 	 */
1654 	EFX_POPULATE_OWORD_3(temp,
1655 			     FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1656 			     FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1657 			     FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1658 	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1659 		EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1660 	EFX_INVERT_OWORD(temp);
1661 	efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1662 
1663 	efx_nic_push_rx_indir_table(efx);
1664 
1665 	/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1666 	 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1667 	 */
1668 	efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
1669 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1670 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1671 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1672 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1673 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1674 	/* Enable SW_EV to inherit in char driver - assume harmless here */
1675 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1676 	/* Prefetch threshold 2 => fetch when descriptor cache half empty */
1677 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1678 	/* Disable hardware watchdog which can misfire */
1679 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1680 	/* Squash TX of packets of 16 bytes or less */
1681 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1682 		EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1683 	efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1684 
1685 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1686 		EFX_POPULATE_OWORD_4(temp,
1687 				     /* Default values */
1688 				     FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1689 				     FRF_BZ_TX_PACE_SB_AF, 0xb,
1690 				     FRF_BZ_TX_PACE_FB_BASE, 0,
1691 				     /* Allow large pace values in the
1692 				      * fast bin. */
1693 				     FRF_BZ_TX_PACE_BIN_TH,
1694 				     FFE_BZ_TX_PACE_RESERVED);
1695 		efx_writeo(efx, &temp, FR_BZ_TX_PACE);
1696 	}
1697 }
1698 
1699 /* Register dump */
1700 
1701 #define REGISTER_REVISION_A	1
1702 #define REGISTER_REVISION_B	2
1703 #define REGISTER_REVISION_C	3
1704 #define REGISTER_REVISION_Z	3	/* latest revision */
1705 
1706 struct efx_nic_reg {
1707 	u32 offset:24;
1708 	u32 min_revision:2, max_revision:2;
1709 };
1710 
1711 #define REGISTER(name, min_rev, max_rev) {				\
1712 	FR_ ## min_rev ## max_rev ## _ ## name,				\
1713 	REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev	\
1714 }
1715 #define REGISTER_AA(name) REGISTER(name, A, A)
1716 #define REGISTER_AB(name) REGISTER(name, A, B)
1717 #define REGISTER_AZ(name) REGISTER(name, A, Z)
1718 #define REGISTER_BB(name) REGISTER(name, B, B)
1719 #define REGISTER_BZ(name) REGISTER(name, B, Z)
1720 #define REGISTER_CZ(name) REGISTER(name, C, Z)
1721 
1722 static const struct efx_nic_reg efx_nic_regs[] = {
1723 	REGISTER_AZ(ADR_REGION),
1724 	REGISTER_AZ(INT_EN_KER),
1725 	REGISTER_BZ(INT_EN_CHAR),
1726 	REGISTER_AZ(INT_ADR_KER),
1727 	REGISTER_BZ(INT_ADR_CHAR),
1728 	/* INT_ACK_KER is WO */
1729 	/* INT_ISR0 is RC */
1730 	REGISTER_AZ(HW_INIT),
1731 	REGISTER_CZ(USR_EV_CFG),
1732 	REGISTER_AB(EE_SPI_HCMD),
1733 	REGISTER_AB(EE_SPI_HADR),
1734 	REGISTER_AB(EE_SPI_HDATA),
1735 	REGISTER_AB(EE_BASE_PAGE),
1736 	REGISTER_AB(EE_VPD_CFG0),
1737 	/* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
1738 	/* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
1739 	/* PCIE_CORE_INDIRECT is indirect */
1740 	REGISTER_AB(NIC_STAT),
1741 	REGISTER_AB(GPIO_CTL),
1742 	REGISTER_AB(GLB_CTL),
1743 	/* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
1744 	REGISTER_BZ(DP_CTRL),
1745 	REGISTER_AZ(MEM_STAT),
1746 	REGISTER_AZ(CS_DEBUG),
1747 	REGISTER_AZ(ALTERA_BUILD),
1748 	REGISTER_AZ(CSR_SPARE),
1749 	REGISTER_AB(PCIE_SD_CTL0123),
1750 	REGISTER_AB(PCIE_SD_CTL45),
1751 	REGISTER_AB(PCIE_PCS_CTL_STAT),
1752 	/* DEBUG_DATA_OUT is not used */
1753 	/* DRV_EV is WO */
1754 	REGISTER_AZ(EVQ_CTL),
1755 	REGISTER_AZ(EVQ_CNT1),
1756 	REGISTER_AZ(EVQ_CNT2),
1757 	REGISTER_AZ(BUF_TBL_CFG),
1758 	REGISTER_AZ(SRM_RX_DC_CFG),
1759 	REGISTER_AZ(SRM_TX_DC_CFG),
1760 	REGISTER_AZ(SRM_CFG),
1761 	/* BUF_TBL_UPD is WO */
1762 	REGISTER_AZ(SRM_UPD_EVQ),
1763 	REGISTER_AZ(SRAM_PARITY),
1764 	REGISTER_AZ(RX_CFG),
1765 	REGISTER_BZ(RX_FILTER_CTL),
1766 	/* RX_FLUSH_DESCQ is WO */
1767 	REGISTER_AZ(RX_DC_CFG),
1768 	REGISTER_AZ(RX_DC_PF_WM),
1769 	REGISTER_BZ(RX_RSS_TKEY),
1770 	/* RX_NODESC_DROP is RC */
1771 	REGISTER_AA(RX_SELF_RST),
1772 	/* RX_DEBUG, RX_PUSH_DROP are not used */
1773 	REGISTER_CZ(RX_RSS_IPV6_REG1),
1774 	REGISTER_CZ(RX_RSS_IPV6_REG2),
1775 	REGISTER_CZ(RX_RSS_IPV6_REG3),
1776 	/* TX_FLUSH_DESCQ is WO */
1777 	REGISTER_AZ(TX_DC_CFG),
1778 	REGISTER_AA(TX_CHKSM_CFG),
1779 	REGISTER_AZ(TX_CFG),
1780 	/* TX_PUSH_DROP is not used */
1781 	REGISTER_AZ(TX_RESERVED),
1782 	REGISTER_BZ(TX_PACE),
1783 	/* TX_PACE_DROP_QID is RC */
1784 	REGISTER_BB(TX_VLAN),
1785 	REGISTER_BZ(TX_IPFIL_PORTEN),
1786 	REGISTER_AB(MD_TXD),
1787 	REGISTER_AB(MD_RXD),
1788 	REGISTER_AB(MD_CS),
1789 	REGISTER_AB(MD_PHY_ADR),
1790 	REGISTER_AB(MD_ID),
1791 	/* MD_STAT is RC */
1792 	REGISTER_AB(MAC_STAT_DMA),
1793 	REGISTER_AB(MAC_CTRL),
1794 	REGISTER_BB(GEN_MODE),
1795 	REGISTER_AB(MAC_MC_HASH_REG0),
1796 	REGISTER_AB(MAC_MC_HASH_REG1),
1797 	REGISTER_AB(GM_CFG1),
1798 	REGISTER_AB(GM_CFG2),
1799 	/* GM_IPG and GM_HD are not used */
1800 	REGISTER_AB(GM_MAX_FLEN),
1801 	/* GM_TEST is not used */
1802 	REGISTER_AB(GM_ADR1),
1803 	REGISTER_AB(GM_ADR2),
1804 	REGISTER_AB(GMF_CFG0),
1805 	REGISTER_AB(GMF_CFG1),
1806 	REGISTER_AB(GMF_CFG2),
1807 	REGISTER_AB(GMF_CFG3),
1808 	REGISTER_AB(GMF_CFG4),
1809 	REGISTER_AB(GMF_CFG5),
1810 	REGISTER_BB(TX_SRC_MAC_CTL),
1811 	REGISTER_AB(XM_ADR_LO),
1812 	REGISTER_AB(XM_ADR_HI),
1813 	REGISTER_AB(XM_GLB_CFG),
1814 	REGISTER_AB(XM_TX_CFG),
1815 	REGISTER_AB(XM_RX_CFG),
1816 	REGISTER_AB(XM_MGT_INT_MASK),
1817 	REGISTER_AB(XM_FC),
1818 	REGISTER_AB(XM_PAUSE_TIME),
1819 	REGISTER_AB(XM_TX_PARAM),
1820 	REGISTER_AB(XM_RX_PARAM),
1821 	/* XM_MGT_INT_MSK (note no 'A') is RC */
1822 	REGISTER_AB(XX_PWR_RST),
1823 	REGISTER_AB(XX_SD_CTL),
1824 	REGISTER_AB(XX_TXDRV_CTL),
1825 	/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
1826 	/* XX_CORE_STAT is partly RC */
1827 };
1828 
1829 struct efx_nic_reg_table {
1830 	u32 offset:24;
1831 	u32 min_revision:2, max_revision:2;
1832 	u32 step:6, rows:21;
1833 };
1834 
1835 #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \
1836 	offset,								\
1837 	REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev,	\
1838 	step, rows							\
1839 }
1840 #define REGISTER_TABLE(name, min_rev, max_rev) 				\
1841 	REGISTER_TABLE_DIMENSIONS(					\
1842 		name, FR_ ## min_rev ## max_rev ## _ ## name,		\
1843 		min_rev, max_rev,					\
1844 		FR_ ## min_rev ## max_rev ## _ ## name ## _STEP,	\
1845 		FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
1846 #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A)
1847 #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z)
1848 #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B)
1849 #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z)
1850 #define REGISTER_TABLE_BB_CZ(name)					\
1851 	REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B,		\
1852 				  FR_BZ_ ## name ## _STEP,		\
1853 				  FR_BB_ ## name ## _ROWS),		\
1854 	REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z,		\
1855 				  FR_BZ_ ## name ## _STEP,		\
1856 				  FR_CZ_ ## name ## _ROWS)
1857 #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z)
1858 
1859 static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
1860 	/* DRIVER is not used */
1861 	/* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
1862 	REGISTER_TABLE_BB(TX_IPFIL_TBL),
1863 	REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
1864 	REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
1865 	REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
1866 	REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
1867 	REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
1868 	REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
1869 	REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
1870 	/* We can't reasonably read all of the buffer table (up to 8MB!).
1871 	 * However this driver will only use a few entries.  Reading
1872 	 * 1K entries allows for some expansion of queue count and
1873 	 * size before we need to change the version. */
1874 	REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
1875 				  A, A, 8, 1024),
1876 	REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
1877 				  B, Z, 8, 1024),
1878 	REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
1879 	REGISTER_TABLE_BB_CZ(TIMER_TBL),
1880 	REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
1881 	REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
1882 	/* TX_FILTER_TBL0 is huge and not used by this driver */
1883 	REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
1884 	REGISTER_TABLE_CZ(MC_TREG_SMEM),
1885 	/* MSIX_PBA_TABLE is not mapped */
1886 	/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
1887 	REGISTER_TABLE_BZ(RX_FILTER_TBL0),
1888 };
1889 
1890 size_t efx_nic_get_regs_len(struct efx_nic *efx)
1891 {
1892 	const struct efx_nic_reg *reg;
1893 	const struct efx_nic_reg_table *table;
1894 	size_t len = 0;
1895 
1896 	for (reg = efx_nic_regs;
1897 	     reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1898 	     reg++)
1899 		if (efx->type->revision >= reg->min_revision &&
1900 		    efx->type->revision <= reg->max_revision)
1901 			len += sizeof(efx_oword_t);
1902 
1903 	for (table = efx_nic_reg_tables;
1904 	     table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1905 	     table++)
1906 		if (efx->type->revision >= table->min_revision &&
1907 		    efx->type->revision <= table->max_revision)
1908 			len += table->rows * min_t(size_t, table->step, 16);
1909 
1910 	return len;
1911 }
1912 
1913 void efx_nic_get_regs(struct efx_nic *efx, void *buf)
1914 {
1915 	const struct efx_nic_reg *reg;
1916 	const struct efx_nic_reg_table *table;
1917 
1918 	for (reg = efx_nic_regs;
1919 	     reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
1920 	     reg++) {
1921 		if (efx->type->revision >= reg->min_revision &&
1922 		    efx->type->revision <= reg->max_revision) {
1923 			efx_reado(efx, (efx_oword_t *)buf, reg->offset);
1924 			buf += sizeof(efx_oword_t);
1925 		}
1926 	}
1927 
1928 	for (table = efx_nic_reg_tables;
1929 	     table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
1930 	     table++) {
1931 		size_t size, i;
1932 
1933 		if (!(efx->type->revision >= table->min_revision &&
1934 		      efx->type->revision <= table->max_revision))
1935 			continue;
1936 
1937 		size = min_t(size_t, table->step, 16);
1938 
1939 		if (table->offset >= efx->type->mem_map_size) {
1940 			/* No longer mapped; return dummy data */
1941 			memcpy(buf, "\xde\xc0\xad\xde", 4);
1942 			buf += table->rows * size;
1943 			continue;
1944 		}
1945 
1946 		for (i = 0; i < table->rows; i++) {
1947 			switch (table->step) {
1948 			case 4: /* 32-bit register or SRAM */
1949 				efx_readd_table(efx, buf, table->offset, i);
1950 				break;
1951 			case 8: /* 64-bit SRAM */
1952 				efx_sram_readq(efx,
1953 					       efx->membase + table->offset,
1954 					       buf, i);
1955 				break;
1956 			case 16: /* 128-bit register */
1957 				efx_reado_table(efx, buf, table->offset, i);
1958 				break;
1959 			case 32: /* 128-bit register, interleaved */
1960 				efx_reado_table(efx, buf, table->offset, 2 * i);
1961 				break;
1962 			default:
1963 				WARN_ON(1);
1964 				return;
1965 			}
1966 			buf += size;
1967 		}
1968 	}
1969 }
1970