xref: /openbmc/linux/drivers/net/ethernet/sfc/ef10.c (revision 089a49b6)
1 /****************************************************************************
2  * Driver for Solarflare network controllers and boards
3  * Copyright 2012-2013 Solarflare Communications Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation, incorporated herein by reference.
8  */
9 
10 #include "net_driver.h"
11 #include "ef10_regs.h"
12 #include "io.h"
13 #include "mcdi.h"
14 #include "mcdi_pcol.h"
15 #include "nic.h"
16 #include "workarounds.h"
17 #include <linux/in.h>
18 #include <linux/jhash.h>
19 #include <linux/wait.h>
20 #include <linux/workqueue.h>
21 
22 /* Hardware control for EF10 architecture including 'Huntington'. */
23 
24 #define EFX_EF10_DRVGEN_EV		7
25 enum {
26 	EFX_EF10_TEST = 1,
27 	EFX_EF10_REFILL,
28 };
29 
30 /* The reserved RSS context value */
31 #define EFX_EF10_RSS_CONTEXT_INVALID	0xffffffff
32 
33 /* The filter table(s) are managed by firmware and we have write-only
34  * access.  When removing filters we must identify them to the
35  * firmware by a 64-bit handle, but this is too wide for Linux kernel
36  * interfaces (32-bit for RX NFC, 16-bit for RFS).  Also, we need to
37  * be able to tell in advance whether a requested insertion will
38  * replace an existing filter.  Therefore we maintain a software hash
39  * table, which should be at least as large as the hardware hash
40  * table.
41  *
42  * Huntington has a single 8K filter table shared between all filter
43  * types and both ports.
44  */
45 #define HUNT_FILTER_TBL_ROWS 8192
46 
47 struct efx_ef10_filter_table {
48 /* The RX match field masks supported by this fw & hw, in order of priority */
49 	enum efx_filter_match_flags rx_match_flags[
50 		MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
51 	unsigned int rx_match_count;
52 
53 	struct {
54 		unsigned long spec;	/* pointer to spec plus flag bits */
55 /* BUSY flag indicates that an update is in progress.  STACK_OLD is
56  * used to mark and sweep stack-owned MAC filters.
57  */
58 #define EFX_EF10_FILTER_FLAG_BUSY	1UL
59 #define EFX_EF10_FILTER_FLAG_STACK_OLD	2UL
60 #define EFX_EF10_FILTER_FLAGS		3UL
61 		u64 handle;		/* firmware handle */
62 	} *entry;
63 	wait_queue_head_t waitq;
64 /* Shadow of net_device address lists, guarded by mac_lock */
65 #define EFX_EF10_FILTER_STACK_UC_MAX	32
66 #define EFX_EF10_FILTER_STACK_MC_MAX	256
67 	struct {
68 		u8 addr[ETH_ALEN];
69 		u16 id;
70 	} stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
71 	  stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
72 	int stack_uc_count;		/* negative for PROMISC */
73 	int stack_mc_count;		/* negative for PROMISC/ALLMULTI */
74 };
75 
76 /* An arbitrary search limit for the software hash table */
77 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
78 
79 static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
80 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
81 static void efx_ef10_filter_table_remove(struct efx_nic *efx);
82 
83 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
84 {
85 	efx_dword_t reg;
86 
87 	efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
88 	return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
89 		EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
90 }
91 
92 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
93 {
94 	return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
95 }
96 
97 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
98 {
99 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
100 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
101 	size_t outlen;
102 	int rc;
103 
104 	BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
105 
106 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
107 			  outbuf, sizeof(outbuf), &outlen);
108 	if (rc)
109 		return rc;
110 	if (outlen < sizeof(outbuf)) {
111 		netif_err(efx, drv, efx->net_dev,
112 			  "unable to read datapath firmware capabilities\n");
113 		return -EIO;
114 	}
115 
116 	nic_data->datapath_caps =
117 		MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
118 
119 	if (!(nic_data->datapath_caps &
120 	      (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
121 		netif_err(efx, drv, efx->net_dev,
122 			  "current firmware does not support TSO\n");
123 		return -ENODEV;
124 	}
125 
126 	if (!(nic_data->datapath_caps &
127 	      (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
128 		netif_err(efx, probe, efx->net_dev,
129 			  "current firmware does not support an RX prefix\n");
130 		return -ENODEV;
131 	}
132 
133 	return 0;
134 }
135 
136 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
137 {
138 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
139 	int rc;
140 
141 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
142 			  outbuf, sizeof(outbuf), NULL);
143 	if (rc)
144 		return rc;
145 	rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
146 	return rc > 0 ? rc : -ERANGE;
147 }
148 
149 static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
150 {
151 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
152 	size_t outlen;
153 	int rc;
154 
155 	BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
156 
157 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
158 			  outbuf, sizeof(outbuf), &outlen);
159 	if (rc)
160 		return rc;
161 	if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
162 		return -EIO;
163 
164 	memcpy(mac_address,
165 	       MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
166 	return 0;
167 }
168 
169 static int efx_ef10_probe(struct efx_nic *efx)
170 {
171 	struct efx_ef10_nic_data *nic_data;
172 	int i, rc;
173 
174 	/* We can have one VI for each 8K region.  However we need
175 	 * multiple TX queues per channel.
176 	 */
177 	efx->max_channels =
178 		min_t(unsigned int,
179 		      EFX_MAX_CHANNELS,
180 		      resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
181 		      (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
182 	BUG_ON(efx->max_channels == 0);
183 
184 	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
185 	if (!nic_data)
186 		return -ENOMEM;
187 	efx->nic_data = nic_data;
188 
189 	rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
190 				  8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
191 	if (rc)
192 		goto fail1;
193 
194 	/* Get the MC's warm boot count.  In case it's rebooting right
195 	 * now, be prepared to retry.
196 	 */
197 	i = 0;
198 	for (;;) {
199 		rc = efx_ef10_get_warm_boot_count(efx);
200 		if (rc >= 0)
201 			break;
202 		if (++i == 5)
203 			goto fail2;
204 		ssleep(1);
205 	}
206 	nic_data->warm_boot_count = rc;
207 
208 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
209 
210 	/* In case we're recovering from a crash (kexec), we want to
211 	 * cancel any outstanding request by the previous user of this
212 	 * function.  We send a special message using the least
213 	 * significant bits of the 'high' (doorbell) register.
214 	 */
215 	_efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
216 
217 	rc = efx_mcdi_init(efx);
218 	if (rc)
219 		goto fail2;
220 
221 	/* Reset (most) configuration for this function */
222 	rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
223 	if (rc)
224 		goto fail3;
225 
226 	/* Enable event logging */
227 	rc = efx_mcdi_log_ctrl(efx, true, false, 0);
228 	if (rc)
229 		goto fail3;
230 
231 	rc = efx_ef10_init_datapath_caps(efx);
232 	if (rc < 0)
233 		goto fail3;
234 
235 	efx->rx_packet_len_offset =
236 		ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
237 
238 	rc = efx_mcdi_port_get_number(efx);
239 	if (rc < 0)
240 		goto fail3;
241 	efx->port_num = rc;
242 
243 	rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
244 	if (rc)
245 		goto fail3;
246 
247 	rc = efx_ef10_get_sysclk_freq(efx);
248 	if (rc < 0)
249 		goto fail3;
250 	efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
251 
252 	/* Check whether firmware supports bug 35388 workaround */
253 	rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
254 	if (rc == 0)
255 		nic_data->workaround_35388 = true;
256 	else if (rc != -ENOSYS && rc != -ENOENT)
257 		goto fail3;
258 	netif_dbg(efx, probe, efx->net_dev,
259 		  "workaround for bug 35388 is %sabled\n",
260 		  nic_data->workaround_35388 ? "en" : "dis");
261 
262 	rc = efx_mcdi_mon_probe(efx);
263 	if (rc)
264 		goto fail3;
265 
266 	return 0;
267 
268 fail3:
269 	efx_mcdi_fini(efx);
270 fail2:
271 	efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
272 fail1:
273 	kfree(nic_data);
274 	efx->nic_data = NULL;
275 	return rc;
276 }
277 
278 static int efx_ef10_free_vis(struct efx_nic *efx)
279 {
280 	int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
281 
282 	/* -EALREADY means nothing to free, so ignore */
283 	if (rc == -EALREADY)
284 		rc = 0;
285 	return rc;
286 }
287 
288 static void efx_ef10_remove(struct efx_nic *efx)
289 {
290 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
291 	int rc;
292 
293 	efx_mcdi_mon_remove(efx);
294 
295 	/* This needs to be after efx_ptp_remove_channel() with no filters */
296 	efx_ef10_rx_free_indir_table(efx);
297 
298 	rc = efx_ef10_free_vis(efx);
299 	WARN_ON(rc != 0);
300 
301 	efx_mcdi_fini(efx);
302 	efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
303 	kfree(nic_data);
304 }
305 
306 static int efx_ef10_alloc_vis(struct efx_nic *efx,
307 			      unsigned int min_vis, unsigned int max_vis)
308 {
309 	MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
310 	MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
311 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
312 	size_t outlen;
313 	int rc;
314 
315 	MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
316 	MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
317 	rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
318 			  outbuf, sizeof(outbuf), &outlen);
319 	if (rc != 0)
320 		return rc;
321 
322 	if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
323 		return -EIO;
324 
325 	netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
326 		  MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
327 
328 	nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
329 	nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
330 	return 0;
331 }
332 
333 static int efx_ef10_dimension_resources(struct efx_nic *efx)
334 {
335 	unsigned int n_vis =
336 		max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
337 
338 	return efx_ef10_alloc_vis(efx, n_vis, n_vis);
339 }
340 
341 static int efx_ef10_init_nic(struct efx_nic *efx)
342 {
343 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
344 	int rc;
345 
346 	if (nic_data->must_check_datapath_caps) {
347 		rc = efx_ef10_init_datapath_caps(efx);
348 		if (rc)
349 			return rc;
350 		nic_data->must_check_datapath_caps = false;
351 	}
352 
353 	if (nic_data->must_realloc_vis) {
354 		/* We cannot let the number of VIs change now */
355 		rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
356 					nic_data->n_allocated_vis);
357 		if (rc)
358 			return rc;
359 		nic_data->must_realloc_vis = false;
360 	}
361 
362 	efx_ef10_rx_push_indir_table(efx);
363 	return 0;
364 }
365 
366 static int efx_ef10_map_reset_flags(u32 *flags)
367 {
368 	enum {
369 		EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
370 				   ETH_RESET_SHARED_SHIFT),
371 		EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
372 				  ETH_RESET_OFFLOAD | ETH_RESET_MAC |
373 				  ETH_RESET_PHY | ETH_RESET_MGMT) <<
374 				 ETH_RESET_SHARED_SHIFT)
375 	};
376 
377 	/* We assume for now that our PCI function is permitted to
378 	 * reset everything.
379 	 */
380 
381 	if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
382 		*flags &= ~EF10_RESET_MC;
383 		return RESET_TYPE_WORLD;
384 	}
385 
386 	if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
387 		*flags &= ~EF10_RESET_PORT;
388 		return RESET_TYPE_ALL;
389 	}
390 
391 	/* no invisible reset implemented */
392 
393 	return -EINVAL;
394 }
395 
396 #define EF10_DMA_STAT(ext_name, mcdi_name)			\
397 	[EF10_STAT_ ## ext_name] =				\
398 	{ #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
399 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name)		\
400 	[EF10_STAT_ ## int_name] =				\
401 	{ NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
402 #define EF10_OTHER_STAT(ext_name)				\
403 	[EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
404 
405 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
406 	EF10_DMA_STAT(tx_bytes, TX_BYTES),
407 	EF10_DMA_STAT(tx_packets, TX_PKTS),
408 	EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
409 	EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
410 	EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
411 	EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
412 	EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
413 	EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
414 	EF10_DMA_STAT(tx_64, TX_64_PKTS),
415 	EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
416 	EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
417 	EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
418 	EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
419 	EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
420 	EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
421 	EF10_DMA_STAT(rx_bytes, RX_BYTES),
422 	EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
423 	EF10_OTHER_STAT(rx_good_bytes),
424 	EF10_OTHER_STAT(rx_bad_bytes),
425 	EF10_DMA_STAT(rx_packets, RX_PKTS),
426 	EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
427 	EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
428 	EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
429 	EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
430 	EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
431 	EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
432 	EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
433 	EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
434 	EF10_DMA_STAT(rx_64, RX_64_PKTS),
435 	EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
436 	EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
437 	EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
438 	EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
439 	EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
440 	EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
441 	EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
442 	EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
443 	EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
444 	EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
445 	EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
446 	EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
447 };
448 
449 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) |		\
450 			       (1ULL << EF10_STAT_tx_packets) |		\
451 			       (1ULL << EF10_STAT_tx_pause) |		\
452 			       (1ULL << EF10_STAT_tx_unicast) |		\
453 			       (1ULL << EF10_STAT_tx_multicast) |	\
454 			       (1ULL << EF10_STAT_tx_broadcast) |	\
455 			       (1ULL << EF10_STAT_rx_bytes) |		\
456 			       (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
457 			       (1ULL << EF10_STAT_rx_good_bytes) |	\
458 			       (1ULL << EF10_STAT_rx_bad_bytes) |	\
459 			       (1ULL << EF10_STAT_rx_packets) |		\
460 			       (1ULL << EF10_STAT_rx_good) |		\
461 			       (1ULL << EF10_STAT_rx_bad) |		\
462 			       (1ULL << EF10_STAT_rx_pause) |		\
463 			       (1ULL << EF10_STAT_rx_control) |		\
464 			       (1ULL << EF10_STAT_rx_unicast) |		\
465 			       (1ULL << EF10_STAT_rx_multicast) |	\
466 			       (1ULL << EF10_STAT_rx_broadcast) |	\
467 			       (1ULL << EF10_STAT_rx_lt64) |		\
468 			       (1ULL << EF10_STAT_rx_64) |		\
469 			       (1ULL << EF10_STAT_rx_65_to_127) |	\
470 			       (1ULL << EF10_STAT_rx_128_to_255) |	\
471 			       (1ULL << EF10_STAT_rx_256_to_511) |	\
472 			       (1ULL << EF10_STAT_rx_512_to_1023) |	\
473 			       (1ULL << EF10_STAT_rx_1024_to_15xx) |	\
474 			       (1ULL << EF10_STAT_rx_15xx_to_jumbo) |	\
475 			       (1ULL << EF10_STAT_rx_gtjumbo) |		\
476 			       (1ULL << EF10_STAT_rx_bad_gtjumbo) |	\
477 			       (1ULL << EF10_STAT_rx_overflow) |	\
478 			       (1ULL << EF10_STAT_rx_nodesc_drops))
479 
480 /* These statistics are only provided by the 10G MAC.  For a 10G/40G
481  * switchable port we do not expose these because they might not
482  * include all the packets they should.
483  */
484 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) |	\
485 				 (1ULL << EF10_STAT_tx_lt64) |		\
486 				 (1ULL << EF10_STAT_tx_64) |		\
487 				 (1ULL << EF10_STAT_tx_65_to_127) |	\
488 				 (1ULL << EF10_STAT_tx_128_to_255) |	\
489 				 (1ULL << EF10_STAT_tx_256_to_511) |	\
490 				 (1ULL << EF10_STAT_tx_512_to_1023) |	\
491 				 (1ULL << EF10_STAT_tx_1024_to_15xx) |	\
492 				 (1ULL << EF10_STAT_tx_15xx_to_jumbo))
493 
494 /* These statistics are only provided by the 40G MAC.  For a 10G/40G
495  * switchable port we do expose these because the errors will otherwise
496  * be silent.
497  */
498 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) |	\
499 				  (1ULL << EF10_STAT_rx_length_error))
500 
501 #if BITS_PER_LONG == 64
502 #define STAT_MASK_BITMAP(bits) (bits)
503 #else
504 #define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32
505 #endif
506 
507 static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx)
508 {
509 	static const unsigned long hunt_40g_stat_mask[] = {
510 		STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
511 				 HUNT_40G_EXTRA_STAT_MASK)
512 	};
513 	static const unsigned long hunt_10g_only_stat_mask[] = {
514 		STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
515 				 HUNT_10G_ONLY_STAT_MASK)
516 	};
517 	u32 port_caps = efx_mcdi_phy_get_caps(efx);
518 
519 	if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
520 		return hunt_40g_stat_mask;
521 	else
522 		return hunt_10g_only_stat_mask;
523 }
524 
525 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
526 {
527 	return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
528 				      efx_ef10_stat_mask(efx), names);
529 }
530 
531 static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
532 {
533 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
534 	const unsigned long *stats_mask = efx_ef10_stat_mask(efx);
535 	__le64 generation_start, generation_end;
536 	u64 *stats = nic_data->stats;
537 	__le64 *dma_stats;
538 
539 	dma_stats = efx->stats_buffer.addr;
540 	nic_data = efx->nic_data;
541 
542 	generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
543 	if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
544 		return 0;
545 	rmb();
546 	efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask,
547 			     stats, efx->stats_buffer.addr, false);
548 	generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
549 	if (generation_end != generation_start)
550 		return -EAGAIN;
551 
552 	/* Update derived statistics */
553 	stats[EF10_STAT_rx_good_bytes] =
554 		stats[EF10_STAT_rx_bytes] -
555 		stats[EF10_STAT_rx_bytes_minus_good_bytes];
556 	efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
557 			     stats[EF10_STAT_rx_bytes_minus_good_bytes]);
558 
559 	return 0;
560 }
561 
562 
563 static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
564 				    struct rtnl_link_stats64 *core_stats)
565 {
566 	const unsigned long *mask = efx_ef10_stat_mask(efx);
567 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
568 	u64 *stats = nic_data->stats;
569 	size_t stats_count = 0, index;
570 	int retry;
571 
572 	/* If we're unlucky enough to read statistics during the DMA, wait
573 	 * up to 10ms for it to finish (typically takes <500us)
574 	 */
575 	for (retry = 0; retry < 100; ++retry) {
576 		if (efx_ef10_try_update_nic_stats(efx) == 0)
577 			break;
578 		udelay(100);
579 	}
580 
581 	if (full_stats) {
582 		for_each_set_bit(index, mask, EF10_STAT_COUNT) {
583 			if (efx_ef10_stat_desc[index].name) {
584 				*full_stats++ = stats[index];
585 				++stats_count;
586 			}
587 		}
588 	}
589 
590 	if (core_stats) {
591 		core_stats->rx_packets = stats[EF10_STAT_rx_packets];
592 		core_stats->tx_packets = stats[EF10_STAT_tx_packets];
593 		core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
594 		core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
595 		core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops];
596 		core_stats->multicast = stats[EF10_STAT_rx_multicast];
597 		core_stats->rx_length_errors =
598 			stats[EF10_STAT_rx_gtjumbo] +
599 			stats[EF10_STAT_rx_length_error];
600 		core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
601 		core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
602 		core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
603 		core_stats->rx_errors = (core_stats->rx_length_errors +
604 					 core_stats->rx_crc_errors +
605 					 core_stats->rx_frame_errors);
606 	}
607 
608 	return stats_count;
609 }
610 
611 static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
612 {
613 	struct efx_nic *efx = channel->efx;
614 	unsigned int mode, value;
615 	efx_dword_t timer_cmd;
616 
617 	if (channel->irq_moderation) {
618 		mode = 3;
619 		value = channel->irq_moderation - 1;
620 	} else {
621 		mode = 0;
622 		value = 0;
623 	}
624 
625 	if (EFX_EF10_WORKAROUND_35388(efx)) {
626 		EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
627 				     EFE_DD_EVQ_IND_TIMER_FLAGS,
628 				     ERF_DD_EVQ_IND_TIMER_MODE, mode,
629 				     ERF_DD_EVQ_IND_TIMER_VAL, value);
630 		efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
631 				channel->channel);
632 	} else {
633 		EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
634 				     ERF_DZ_TC_TIMER_VAL, value);
635 		efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
636 				channel->channel);
637 	}
638 }
639 
640 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
641 {
642 	wol->supported = 0;
643 	wol->wolopts = 0;
644 	memset(&wol->sopass, 0, sizeof(wol->sopass));
645 }
646 
647 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
648 {
649 	if (type != 0)
650 		return -EINVAL;
651 	return 0;
652 }
653 
654 static void efx_ef10_mcdi_request(struct efx_nic *efx,
655 				  const efx_dword_t *hdr, size_t hdr_len,
656 				  const efx_dword_t *sdu, size_t sdu_len)
657 {
658 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
659 	u8 *pdu = nic_data->mcdi_buf.addr;
660 
661 	memcpy(pdu, hdr, hdr_len);
662 	memcpy(pdu + hdr_len, sdu, sdu_len);
663 	wmb();
664 
665 	/* The hardware provides 'low' and 'high' (doorbell) registers
666 	 * for passing the 64-bit address of an MCDI request to
667 	 * firmware.  However the dwords are swapped by firmware.  The
668 	 * least significant bits of the doorbell are then 0 for all
669 	 * MCDI requests due to alignment.
670 	 */
671 	_efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
672 		    ER_DZ_MC_DB_LWRD);
673 	_efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
674 		    ER_DZ_MC_DB_HWRD);
675 }
676 
677 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
678 {
679 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
680 	const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
681 
682 	rmb();
683 	return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
684 }
685 
686 static void
687 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
688 			    size_t offset, size_t outlen)
689 {
690 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
691 	const u8 *pdu = nic_data->mcdi_buf.addr;
692 
693 	memcpy(outbuf, pdu + offset, outlen);
694 }
695 
696 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
697 {
698 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
699 	int rc;
700 
701 	rc = efx_ef10_get_warm_boot_count(efx);
702 	if (rc < 0) {
703 		/* The firmware is presumably in the process of
704 		 * rebooting.  However, we are supposed to report each
705 		 * reboot just once, so we must only do that once we
706 		 * can read and store the updated warm boot count.
707 		 */
708 		return 0;
709 	}
710 
711 	if (rc == nic_data->warm_boot_count)
712 		return 0;
713 
714 	nic_data->warm_boot_count = rc;
715 
716 	/* All our allocations have been reset */
717 	nic_data->must_realloc_vis = true;
718 	nic_data->must_restore_filters = true;
719 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
720 
721 	/* The datapath firmware might have been changed */
722 	nic_data->must_check_datapath_caps = true;
723 
724 	/* MAC statistics have been cleared on the NIC; clear the local
725 	 * statistic that we update with efx_update_diff_stat().
726 	 */
727 	nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
728 
729 	return -EIO;
730 }
731 
732 /* Handle an MSI interrupt
733  *
734  * Handle an MSI hardware interrupt.  This routine schedules event
735  * queue processing.  No interrupt acknowledgement cycle is necessary.
736  * Also, we never need to check that the interrupt is for us, since
737  * MSI interrupts cannot be shared.
738  */
739 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
740 {
741 	struct efx_msi_context *context = dev_id;
742 	struct efx_nic *efx = context->efx;
743 
744 	netif_vdbg(efx, intr, efx->net_dev,
745 		   "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
746 
747 	if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
748 		/* Note test interrupts */
749 		if (context->index == efx->irq_level)
750 			efx->last_irq_cpu = raw_smp_processor_id();
751 
752 		/* Schedule processing of the channel */
753 		efx_schedule_channel_irq(efx->channel[context->index]);
754 	}
755 
756 	return IRQ_HANDLED;
757 }
758 
759 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
760 {
761 	struct efx_nic *efx = dev_id;
762 	bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
763 	struct efx_channel *channel;
764 	efx_dword_t reg;
765 	u32 queues;
766 
767 	/* Read the ISR which also ACKs the interrupts */
768 	efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
769 	queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
770 
771 	if (queues == 0)
772 		return IRQ_NONE;
773 
774 	if (likely(soft_enabled)) {
775 		/* Note test interrupts */
776 		if (queues & (1U << efx->irq_level))
777 			efx->last_irq_cpu = raw_smp_processor_id();
778 
779 		efx_for_each_channel(channel, efx) {
780 			if (queues & 1)
781 				efx_schedule_channel_irq(channel);
782 			queues >>= 1;
783 		}
784 	}
785 
786 	netif_vdbg(efx, intr, efx->net_dev,
787 		   "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
788 		   irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
789 
790 	return IRQ_HANDLED;
791 }
792 
793 static void efx_ef10_irq_test_generate(struct efx_nic *efx)
794 {
795 	MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
796 
797 	BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
798 
799 	MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
800 	(void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
801 			    inbuf, sizeof(inbuf), NULL, 0, NULL);
802 }
803 
804 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
805 {
806 	return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
807 				    (tx_queue->ptr_mask + 1) *
808 				    sizeof(efx_qword_t),
809 				    GFP_KERNEL);
810 }
811 
812 /* This writes to the TX_DESC_WPTR and also pushes data */
813 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
814 					 const efx_qword_t *txd)
815 {
816 	unsigned int write_ptr;
817 	efx_oword_t reg;
818 
819 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
820 	EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
821 	reg.qword[0] = *txd;
822 	efx_writeo_page(tx_queue->efx, &reg,
823 			ER_DZ_TX_DESC_UPD, tx_queue->queue);
824 }
825 
826 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
827 {
828 	MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
829 						       EFX_BUF_SIZE));
830 	MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
831 	bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
832 	size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
833 	struct efx_channel *channel = tx_queue->channel;
834 	struct efx_nic *efx = tx_queue->efx;
835 	size_t inlen, outlen;
836 	dma_addr_t dma_addr;
837 	efx_qword_t *txd;
838 	int rc;
839 	int i;
840 
841 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
842 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
843 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
844 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
845 	MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
846 			      INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
847 			      INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
848 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
849 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
850 
851 	dma_addr = tx_queue->txd.buf.dma_addr;
852 
853 	netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
854 		  tx_queue->queue, entries, (u64)dma_addr);
855 
856 	for (i = 0; i < entries; ++i) {
857 		MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
858 		dma_addr += EFX_BUF_SIZE;
859 	}
860 
861 	inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
862 
863 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
864 			  outbuf, sizeof(outbuf), &outlen);
865 	if (rc)
866 		goto fail;
867 
868 	/* A previous user of this TX queue might have set us up the
869 	 * bomb by writing a descriptor to the TX push collector but
870 	 * not the doorbell.  (Each collector belongs to a port, not a
871 	 * queue or function, so cannot easily be reset.)  We must
872 	 * attempt to push a no-op descriptor in its place.
873 	 */
874 	tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
875 	tx_queue->insert_count = 1;
876 	txd = efx_tx_desc(tx_queue, 0);
877 	EFX_POPULATE_QWORD_4(*txd,
878 			     ESF_DZ_TX_DESC_IS_OPT, true,
879 			     ESF_DZ_TX_OPTION_TYPE,
880 			     ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
881 			     ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
882 			     ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
883 	tx_queue->write_count = 1;
884 	wmb();
885 	efx_ef10_push_tx_desc(tx_queue, txd);
886 
887 	return;
888 
889 fail:
890 	WARN_ON(true);
891 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
892 }
893 
894 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
895 {
896 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
897 	MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
898 	struct efx_nic *efx = tx_queue->efx;
899 	size_t outlen;
900 	int rc;
901 
902 	MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
903 		       tx_queue->queue);
904 
905 	rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
906 			  outbuf, sizeof(outbuf), &outlen);
907 
908 	if (rc && rc != -EALREADY)
909 		goto fail;
910 
911 	return;
912 
913 fail:
914 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
915 }
916 
917 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
918 {
919 	efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
920 }
921 
922 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
923 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
924 {
925 	unsigned int write_ptr;
926 	efx_dword_t reg;
927 
928 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
929 	EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
930 	efx_writed_page(tx_queue->efx, &reg,
931 			ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
932 }
933 
934 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
935 {
936 	unsigned int old_write_count = tx_queue->write_count;
937 	struct efx_tx_buffer *buffer;
938 	unsigned int write_ptr;
939 	efx_qword_t *txd;
940 
941 	BUG_ON(tx_queue->write_count == tx_queue->insert_count);
942 
943 	do {
944 		write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
945 		buffer = &tx_queue->buffer[write_ptr];
946 		txd = efx_tx_desc(tx_queue, write_ptr);
947 		++tx_queue->write_count;
948 
949 		/* Create TX descriptor ring entry */
950 		if (buffer->flags & EFX_TX_BUF_OPTION) {
951 			*txd = buffer->option;
952 		} else {
953 			BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
954 			EFX_POPULATE_QWORD_3(
955 				*txd,
956 				ESF_DZ_TX_KER_CONT,
957 				buffer->flags & EFX_TX_BUF_CONT,
958 				ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
959 				ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
960 		}
961 	} while (tx_queue->write_count != tx_queue->insert_count);
962 
963 	wmb(); /* Ensure descriptors are written before they are fetched */
964 
965 	if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
966 		txd = efx_tx_desc(tx_queue,
967 				  old_write_count & tx_queue->ptr_mask);
968 		efx_ef10_push_tx_desc(tx_queue, txd);
969 		++tx_queue->pushes;
970 	} else {
971 		efx_ef10_notify_tx_desc(tx_queue);
972 	}
973 }
974 
975 static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
976 {
977 	MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
978 	MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
979 	size_t outlen;
980 	int rc;
981 
982 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
983 		       EVB_PORT_ID_ASSIGNED);
984 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
985 		       MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
986 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
987 		       EFX_MAX_CHANNELS);
988 
989 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
990 		outbuf, sizeof(outbuf), &outlen);
991 	if (rc != 0)
992 		return rc;
993 
994 	if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
995 		return -EIO;
996 
997 	*context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
998 
999 	return 0;
1000 }
1001 
1002 static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
1003 {
1004 	MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
1005 	int rc;
1006 
1007 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
1008 		       context);
1009 
1010 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
1011 			    NULL, 0, NULL);
1012 	WARN_ON(rc != 0);
1013 }
1014 
1015 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
1016 {
1017 	MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
1018 	MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
1019 	int i, rc;
1020 
1021 	MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
1022 		       context);
1023 	BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1024 		     MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
1025 
1026 	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
1027 		MCDI_PTR(tablebuf,
1028 			 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
1029 				(u8) efx->rx_indir_table[i];
1030 
1031 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
1032 			  sizeof(tablebuf), NULL, 0, NULL);
1033 	if (rc != 0)
1034 		return rc;
1035 
1036 	MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
1037 		       context);
1038 	BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
1039 		     MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
1040 	for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
1041 		MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
1042 			efx->rx_hash_key[i];
1043 
1044 	return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
1045 			    sizeof(keybuf), NULL, 0, NULL);
1046 }
1047 
1048 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
1049 {
1050 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1051 
1052 	if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
1053 		efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
1054 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1055 }
1056 
1057 static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
1058 {
1059 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1060 	int rc;
1061 
1062 	netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
1063 
1064 	if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
1065 		rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
1066 		if (rc != 0)
1067 			goto fail;
1068 	}
1069 
1070 	rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
1071 	if (rc != 0)
1072 		goto fail;
1073 
1074 	return;
1075 
1076 fail:
1077 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1078 }
1079 
1080 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
1081 {
1082 	return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
1083 				    (rx_queue->ptr_mask + 1) *
1084 				    sizeof(efx_qword_t),
1085 				    GFP_KERNEL);
1086 }
1087 
1088 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
1089 {
1090 	MCDI_DECLARE_BUF(inbuf,
1091 			 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1092 						EFX_BUF_SIZE));
1093 	MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
1094 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1095 	size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
1096 	struct efx_nic *efx = rx_queue->efx;
1097 	size_t inlen, outlen;
1098 	dma_addr_t dma_addr;
1099 	int rc;
1100 	int i;
1101 
1102 	rx_queue->scatter_n = 0;
1103 	rx_queue->scatter_len = 0;
1104 
1105 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
1106 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
1107 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
1108 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
1109 		       efx_rx_queue_index(rx_queue));
1110 	MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
1111 			      INIT_RXQ_IN_FLAG_PREFIX, 1);
1112 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
1113 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1114 
1115 	dma_addr = rx_queue->rxd.buf.dma_addr;
1116 
1117 	netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
1118 		  efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
1119 
1120 	for (i = 0; i < entries; ++i) {
1121 		MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
1122 		dma_addr += EFX_BUF_SIZE;
1123 	}
1124 
1125 	inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
1126 
1127 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
1128 			  outbuf, sizeof(outbuf), &outlen);
1129 	if (rc)
1130 		goto fail;
1131 
1132 	return;
1133 
1134 fail:
1135 	WARN_ON(true);
1136 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1137 }
1138 
1139 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
1140 {
1141 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
1142 	MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
1143 	struct efx_nic *efx = rx_queue->efx;
1144 	size_t outlen;
1145 	int rc;
1146 
1147 	MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
1148 		       efx_rx_queue_index(rx_queue));
1149 
1150 	rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
1151 			  outbuf, sizeof(outbuf), &outlen);
1152 
1153 	if (rc && rc != -EALREADY)
1154 		goto fail;
1155 
1156 	return;
1157 
1158 fail:
1159 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1160 }
1161 
1162 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
1163 {
1164 	efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
1165 }
1166 
1167 /* This creates an entry in the RX descriptor queue */
1168 static inline void
1169 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
1170 {
1171 	struct efx_rx_buffer *rx_buf;
1172 	efx_qword_t *rxd;
1173 
1174 	rxd = efx_rx_desc(rx_queue, index);
1175 	rx_buf = efx_rx_buffer(rx_queue, index);
1176 	EFX_POPULATE_QWORD_2(*rxd,
1177 			     ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
1178 			     ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
1179 }
1180 
1181 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
1182 {
1183 	struct efx_nic *efx = rx_queue->efx;
1184 	unsigned int write_count;
1185 	efx_dword_t reg;
1186 
1187 	/* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
1188 	write_count = rx_queue->added_count & ~7;
1189 	if (rx_queue->notified_count == write_count)
1190 		return;
1191 
1192 	do
1193 		efx_ef10_build_rx_desc(
1194 			rx_queue,
1195 			rx_queue->notified_count & rx_queue->ptr_mask);
1196 	while (++rx_queue->notified_count != write_count);
1197 
1198 	wmb();
1199 	EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
1200 			     write_count & rx_queue->ptr_mask);
1201 	efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
1202 			efx_rx_queue_index(rx_queue));
1203 }
1204 
1205 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
1206 
1207 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
1208 {
1209 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1210 	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
1211 	efx_qword_t event;
1212 
1213 	EFX_POPULATE_QWORD_2(event,
1214 			     ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
1215 			     ESF_DZ_EV_DATA, EFX_EF10_REFILL);
1216 
1217 	MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
1218 
1219 	/* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
1220 	 * already swapped the data to little-endian order.
1221 	 */
1222 	memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
1223 	       sizeof(efx_qword_t));
1224 
1225 	efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
1226 			   inbuf, sizeof(inbuf), 0,
1227 			   efx_ef10_rx_defer_refill_complete, 0);
1228 }
1229 
1230 static void
1231 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
1232 				  int rc, efx_dword_t *outbuf,
1233 				  size_t outlen_actual)
1234 {
1235 	/* nothing to do */
1236 }
1237 
1238 static int efx_ef10_ev_probe(struct efx_channel *channel)
1239 {
1240 	return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
1241 				    (channel->eventq_mask + 1) *
1242 				    sizeof(efx_qword_t),
1243 				    GFP_KERNEL);
1244 }
1245 
1246 static int efx_ef10_ev_init(struct efx_channel *channel)
1247 {
1248 	MCDI_DECLARE_BUF(inbuf,
1249 			 MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
1250 						EFX_BUF_SIZE));
1251 	MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
1252 	size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
1253 	struct efx_nic *efx = channel->efx;
1254 	struct efx_ef10_nic_data *nic_data;
1255 	bool supports_rx_merge;
1256 	size_t inlen, outlen;
1257 	dma_addr_t dma_addr;
1258 	int rc;
1259 	int i;
1260 
1261 	nic_data = efx->nic_data;
1262 	supports_rx_merge =
1263 		!!(nic_data->datapath_caps &
1264 		   1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
1265 
1266 	/* Fill event queue with all ones (i.e. empty events) */
1267 	memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1268 
1269 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
1270 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
1271 	/* INIT_EVQ expects index in vector table, not absolute */
1272 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
1273 	MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
1274 			      INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
1275 			      INIT_EVQ_IN_FLAG_RX_MERGE, 1,
1276 			      INIT_EVQ_IN_FLAG_TX_MERGE, 1,
1277 			      INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
1278 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
1279 		       MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
1280 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
1281 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
1282 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
1283 		       MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
1284 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
1285 
1286 	dma_addr = channel->eventq.buf.dma_addr;
1287 	for (i = 0; i < entries; ++i) {
1288 		MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
1289 		dma_addr += EFX_BUF_SIZE;
1290 	}
1291 
1292 	inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
1293 
1294 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
1295 			  outbuf, sizeof(outbuf), &outlen);
1296 	if (rc)
1297 		goto fail;
1298 
1299 	/* IRQ return is ignored */
1300 
1301 	return 0;
1302 
1303 fail:
1304 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1305 	return rc;
1306 }
1307 
1308 static void efx_ef10_ev_fini(struct efx_channel *channel)
1309 {
1310 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
1311 	MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
1312 	struct efx_nic *efx = channel->efx;
1313 	size_t outlen;
1314 	int rc;
1315 
1316 	MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
1317 
1318 	rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
1319 			  outbuf, sizeof(outbuf), &outlen);
1320 
1321 	if (rc && rc != -EALREADY)
1322 		goto fail;
1323 
1324 	return;
1325 
1326 fail:
1327 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1328 }
1329 
1330 static void efx_ef10_ev_remove(struct efx_channel *channel)
1331 {
1332 	efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
1333 }
1334 
1335 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
1336 					   unsigned int rx_queue_label)
1337 {
1338 	struct efx_nic *efx = rx_queue->efx;
1339 
1340 	netif_info(efx, hw, efx->net_dev,
1341 		   "rx event arrived on queue %d labeled as queue %u\n",
1342 		   efx_rx_queue_index(rx_queue), rx_queue_label);
1343 
1344 	efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1345 }
1346 
1347 static void
1348 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
1349 			     unsigned int actual, unsigned int expected)
1350 {
1351 	unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
1352 	struct efx_nic *efx = rx_queue->efx;
1353 
1354 	netif_info(efx, hw, efx->net_dev,
1355 		   "dropped %d events (index=%d expected=%d)\n",
1356 		   dropped, actual, expected);
1357 
1358 	efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1359 }
1360 
1361 /* partially received RX was aborted. clean up. */
1362 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
1363 {
1364 	unsigned int rx_desc_ptr;
1365 
1366 	WARN_ON(rx_queue->scatter_n == 0);
1367 
1368 	netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
1369 		  "scattered RX aborted (dropping %u buffers)\n",
1370 		  rx_queue->scatter_n);
1371 
1372 	rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
1373 
1374 	efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
1375 		      0, EFX_RX_PKT_DISCARD);
1376 
1377 	rx_queue->removed_count += rx_queue->scatter_n;
1378 	rx_queue->scatter_n = 0;
1379 	rx_queue->scatter_len = 0;
1380 	++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
1381 }
1382 
1383 static int efx_ef10_handle_rx_event(struct efx_channel *channel,
1384 				    const efx_qword_t *event)
1385 {
1386 	unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
1387 	unsigned int n_descs, n_packets, i;
1388 	struct efx_nic *efx = channel->efx;
1389 	struct efx_rx_queue *rx_queue;
1390 	bool rx_cont;
1391 	u16 flags = 0;
1392 
1393 	if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1394 		return 0;
1395 
1396 	/* Basic packet information */
1397 	rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
1398 	next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
1399 	rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
1400 	rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
1401 	rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
1402 
1403 	WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
1404 
1405 	rx_queue = efx_channel_get_rx_queue(channel);
1406 
1407 	if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
1408 		efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
1409 
1410 	n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
1411 		   ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1412 
1413 	if (n_descs != rx_queue->scatter_n + 1) {
1414 		/* detect rx abort */
1415 		if (unlikely(n_descs == rx_queue->scatter_n)) {
1416 			WARN_ON(rx_bytes != 0);
1417 			efx_ef10_handle_rx_abort(rx_queue);
1418 			return 0;
1419 		}
1420 
1421 		if (unlikely(rx_queue->scatter_n != 0)) {
1422 			/* Scattered packet completions cannot be
1423 			 * merged, so something has gone wrong.
1424 			 */
1425 			efx_ef10_handle_rx_bad_lbits(
1426 				rx_queue, next_ptr_lbits,
1427 				(rx_queue->removed_count +
1428 				 rx_queue->scatter_n + 1) &
1429 				((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1430 			return 0;
1431 		}
1432 
1433 		/* Merged completion for multiple non-scattered packets */
1434 		rx_queue->scatter_n = 1;
1435 		rx_queue->scatter_len = 0;
1436 		n_packets = n_descs;
1437 		++channel->n_rx_merge_events;
1438 		channel->n_rx_merge_packets += n_packets;
1439 		flags |= EFX_RX_PKT_PREFIX_LEN;
1440 	} else {
1441 		++rx_queue->scatter_n;
1442 		rx_queue->scatter_len += rx_bytes;
1443 		if (rx_cont)
1444 			return 0;
1445 		n_packets = 1;
1446 	}
1447 
1448 	if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
1449 		flags |= EFX_RX_PKT_DISCARD;
1450 
1451 	if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
1452 		channel->n_rx_ip_hdr_chksum_err += n_packets;
1453 	} else if (unlikely(EFX_QWORD_FIELD(*event,
1454 					    ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
1455 		channel->n_rx_tcp_udp_chksum_err += n_packets;
1456 	} else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
1457 		   rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
1458 		flags |= EFX_RX_PKT_CSUMMED;
1459 	}
1460 
1461 	if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
1462 		flags |= EFX_RX_PKT_TCP;
1463 
1464 	channel->irq_mod_score += 2 * n_packets;
1465 
1466 	/* Handle received packet(s) */
1467 	for (i = 0; i < n_packets; i++) {
1468 		efx_rx_packet(rx_queue,
1469 			      rx_queue->removed_count & rx_queue->ptr_mask,
1470 			      rx_queue->scatter_n, rx_queue->scatter_len,
1471 			      flags);
1472 		rx_queue->removed_count += rx_queue->scatter_n;
1473 	}
1474 
1475 	rx_queue->scatter_n = 0;
1476 	rx_queue->scatter_len = 0;
1477 
1478 	return n_packets;
1479 }
1480 
1481 static int
1482 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
1483 {
1484 	struct efx_nic *efx = channel->efx;
1485 	struct efx_tx_queue *tx_queue;
1486 	unsigned int tx_ev_desc_ptr;
1487 	unsigned int tx_ev_q_label;
1488 	int tx_descs = 0;
1489 
1490 	if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1491 		return 0;
1492 
1493 	if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
1494 		return 0;
1495 
1496 	/* Transmit completion */
1497 	tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
1498 	tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
1499 	tx_queue = efx_channel_get_tx_queue(channel,
1500 					    tx_ev_q_label % EFX_TXQ_TYPES);
1501 	tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
1502 		    tx_queue->ptr_mask);
1503 	efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
1504 
1505 	return tx_descs;
1506 }
1507 
1508 static void
1509 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1510 {
1511 	struct efx_nic *efx = channel->efx;
1512 	int subcode;
1513 
1514 	subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
1515 
1516 	switch (subcode) {
1517 	case ESE_DZ_DRV_TIMER_EV:
1518 	case ESE_DZ_DRV_WAKE_UP_EV:
1519 		break;
1520 	case ESE_DZ_DRV_START_UP_EV:
1521 		/* event queue init complete. ok. */
1522 		break;
1523 	default:
1524 		netif_err(efx, hw, efx->net_dev,
1525 			  "channel %d unknown driver event type %d"
1526 			  " (data " EFX_QWORD_FMT ")\n",
1527 			  channel->channel, subcode,
1528 			  EFX_QWORD_VAL(*event));
1529 
1530 	}
1531 }
1532 
1533 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
1534 						   efx_qword_t *event)
1535 {
1536 	struct efx_nic *efx = channel->efx;
1537 	u32 subcode;
1538 
1539 	subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
1540 
1541 	switch (subcode) {
1542 	case EFX_EF10_TEST:
1543 		channel->event_test_cpu = raw_smp_processor_id();
1544 		break;
1545 	case EFX_EF10_REFILL:
1546 		/* The queue must be empty, so we won't receive any rx
1547 		 * events, so efx_process_channel() won't refill the
1548 		 * queue. Refill it here
1549 		 */
1550 		efx_fast_push_rx_descriptors(&channel->rx_queue);
1551 		break;
1552 	default:
1553 		netif_err(efx, hw, efx->net_dev,
1554 			  "channel %d unknown driver event type %u"
1555 			  " (data " EFX_QWORD_FMT ")\n",
1556 			  channel->channel, (unsigned) subcode,
1557 			  EFX_QWORD_VAL(*event));
1558 	}
1559 }
1560 
1561 static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
1562 {
1563 	struct efx_nic *efx = channel->efx;
1564 	efx_qword_t event, *p_event;
1565 	unsigned int read_ptr;
1566 	int ev_code;
1567 	int tx_descs = 0;
1568 	int spent = 0;
1569 
1570 	read_ptr = channel->eventq_read_ptr;
1571 
1572 	for (;;) {
1573 		p_event = efx_event(channel, read_ptr);
1574 		event = *p_event;
1575 
1576 		if (!efx_event_present(&event))
1577 			break;
1578 
1579 		EFX_SET_QWORD(*p_event);
1580 
1581 		++read_ptr;
1582 
1583 		ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
1584 
1585 		netif_vdbg(efx, drv, efx->net_dev,
1586 			   "processing event on %d " EFX_QWORD_FMT "\n",
1587 			   channel->channel, EFX_QWORD_VAL(event));
1588 
1589 		switch (ev_code) {
1590 		case ESE_DZ_EV_CODE_MCDI_EV:
1591 			efx_mcdi_process_event(channel, &event);
1592 			break;
1593 		case ESE_DZ_EV_CODE_RX_EV:
1594 			spent += efx_ef10_handle_rx_event(channel, &event);
1595 			if (spent >= quota) {
1596 				/* XXX can we split a merged event to
1597 				 * avoid going over-quota?
1598 				 */
1599 				spent = quota;
1600 				goto out;
1601 			}
1602 			break;
1603 		case ESE_DZ_EV_CODE_TX_EV:
1604 			tx_descs += efx_ef10_handle_tx_event(channel, &event);
1605 			if (tx_descs > efx->txq_entries) {
1606 				spent = quota;
1607 				goto out;
1608 			} else if (++spent == quota) {
1609 				goto out;
1610 			}
1611 			break;
1612 		case ESE_DZ_EV_CODE_DRIVER_EV:
1613 			efx_ef10_handle_driver_event(channel, &event);
1614 			if (++spent == quota)
1615 				goto out;
1616 			break;
1617 		case EFX_EF10_DRVGEN_EV:
1618 			efx_ef10_handle_driver_generated_event(channel, &event);
1619 			break;
1620 		default:
1621 			netif_err(efx, hw, efx->net_dev,
1622 				  "channel %d unknown event type %d"
1623 				  " (data " EFX_QWORD_FMT ")\n",
1624 				  channel->channel, ev_code,
1625 				  EFX_QWORD_VAL(event));
1626 		}
1627 	}
1628 
1629 out:
1630 	channel->eventq_read_ptr = read_ptr;
1631 	return spent;
1632 }
1633 
1634 static void efx_ef10_ev_read_ack(struct efx_channel *channel)
1635 {
1636 	struct efx_nic *efx = channel->efx;
1637 	efx_dword_t rptr;
1638 
1639 	if (EFX_EF10_WORKAROUND_35388(efx)) {
1640 		BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
1641 			     (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
1642 		BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
1643 			     (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
1644 
1645 		EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
1646 				     EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
1647 				     ERF_DD_EVQ_IND_RPTR,
1648 				     (channel->eventq_read_ptr &
1649 				      channel->eventq_mask) >>
1650 				     ERF_DD_EVQ_IND_RPTR_WIDTH);
1651 		efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
1652 				channel->channel);
1653 		EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
1654 				     EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
1655 				     ERF_DD_EVQ_IND_RPTR,
1656 				     channel->eventq_read_ptr &
1657 				     ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
1658 		efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
1659 				channel->channel);
1660 	} else {
1661 		EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
1662 				     channel->eventq_read_ptr &
1663 				     channel->eventq_mask);
1664 		efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
1665 	}
1666 }
1667 
1668 static void efx_ef10_ev_test_generate(struct efx_channel *channel)
1669 {
1670 	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
1671 	struct efx_nic *efx = channel->efx;
1672 	efx_qword_t event;
1673 	int rc;
1674 
1675 	EFX_POPULATE_QWORD_2(event,
1676 			     ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
1677 			     ESF_DZ_EV_DATA, EFX_EF10_TEST);
1678 
1679 	MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
1680 
1681 	/* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
1682 	 * already swapped the data to little-endian order.
1683 	 */
1684 	memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
1685 	       sizeof(efx_qword_t));
1686 
1687 	rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
1688 			  NULL, 0, NULL);
1689 	if (rc != 0)
1690 		goto fail;
1691 
1692 	return;
1693 
1694 fail:
1695 	WARN_ON(true);
1696 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1697 }
1698 
1699 void efx_ef10_handle_drain_event(struct efx_nic *efx)
1700 {
1701 	if (atomic_dec_and_test(&efx->active_queues))
1702 		wake_up(&efx->flush_wq);
1703 
1704 	WARN_ON(atomic_read(&efx->active_queues) < 0);
1705 }
1706 
1707 static int efx_ef10_fini_dmaq(struct efx_nic *efx)
1708 {
1709 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1710 	struct efx_channel *channel;
1711 	struct efx_tx_queue *tx_queue;
1712 	struct efx_rx_queue *rx_queue;
1713 	int pending;
1714 
1715 	/* If the MC has just rebooted, the TX/RX queues will have already been
1716 	 * torn down, but efx->active_queues needs to be set to zero.
1717 	 */
1718 	if (nic_data->must_realloc_vis) {
1719 		atomic_set(&efx->active_queues, 0);
1720 		return 0;
1721 	}
1722 
1723 	/* Do not attempt to write to the NIC during EEH recovery */
1724 	if (efx->state != STATE_RECOVERY) {
1725 		efx_for_each_channel(channel, efx) {
1726 			efx_for_each_channel_rx_queue(rx_queue, channel)
1727 				efx_ef10_rx_fini(rx_queue);
1728 			efx_for_each_channel_tx_queue(tx_queue, channel)
1729 				efx_ef10_tx_fini(tx_queue);
1730 		}
1731 
1732 		wait_event_timeout(efx->flush_wq,
1733 				   atomic_read(&efx->active_queues) == 0,
1734 				   msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
1735 		pending = atomic_read(&efx->active_queues);
1736 		if (pending) {
1737 			netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
1738 				  pending);
1739 			return -ETIMEDOUT;
1740 		}
1741 	}
1742 
1743 	return 0;
1744 }
1745 
1746 static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
1747 				  const struct efx_filter_spec *right)
1748 {
1749 	if ((left->match_flags ^ right->match_flags) |
1750 	    ((left->flags ^ right->flags) &
1751 	     (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
1752 		return false;
1753 
1754 	return memcmp(&left->outer_vid, &right->outer_vid,
1755 		      sizeof(struct efx_filter_spec) -
1756 		      offsetof(struct efx_filter_spec, outer_vid)) == 0;
1757 }
1758 
1759 static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
1760 {
1761 	BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
1762 	return jhash2((const u32 *)&spec->outer_vid,
1763 		      (sizeof(struct efx_filter_spec) -
1764 		       offsetof(struct efx_filter_spec, outer_vid)) / 4,
1765 		      0);
1766 	/* XXX should we randomise the initval? */
1767 }
1768 
1769 /* Decide whether a filter should be exclusive or else should allow
1770  * delivery to additional recipients.  Currently we decide that
1771  * filters for specific local unicast MAC and IP addresses are
1772  * exclusive.
1773  */
1774 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
1775 {
1776 	if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
1777 	    !is_multicast_ether_addr(spec->loc_mac))
1778 		return true;
1779 
1780 	if ((spec->match_flags &
1781 	     (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
1782 	    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
1783 		if (spec->ether_type == htons(ETH_P_IP) &&
1784 		    !ipv4_is_multicast(spec->loc_host[0]))
1785 			return true;
1786 		if (spec->ether_type == htons(ETH_P_IPV6) &&
1787 		    ((const u8 *)spec->loc_host)[0] != 0xff)
1788 			return true;
1789 	}
1790 
1791 	return false;
1792 }
1793 
1794 static struct efx_filter_spec *
1795 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
1796 			   unsigned int filter_idx)
1797 {
1798 	return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
1799 					  ~EFX_EF10_FILTER_FLAGS);
1800 }
1801 
1802 static unsigned int
1803 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
1804 			   unsigned int filter_idx)
1805 {
1806 	return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
1807 }
1808 
1809 static void
1810 efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
1811 			  unsigned int filter_idx,
1812 			  const struct efx_filter_spec *spec,
1813 			  unsigned int flags)
1814 {
1815 	table->entry[filter_idx].spec =	(unsigned long)spec | flags;
1816 }
1817 
1818 static void efx_ef10_filter_push_prep(struct efx_nic *efx,
1819 				      const struct efx_filter_spec *spec,
1820 				      efx_dword_t *inbuf, u64 handle,
1821 				      bool replacing)
1822 {
1823 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1824 
1825 	memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
1826 
1827 	if (replacing) {
1828 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
1829 			       MC_CMD_FILTER_OP_IN_OP_REPLACE);
1830 		MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
1831 	} else {
1832 		u32 match_fields = 0;
1833 
1834 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
1835 			       efx_ef10_filter_is_exclusive(spec) ?
1836 			       MC_CMD_FILTER_OP_IN_OP_INSERT :
1837 			       MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
1838 
1839 		/* Convert match flags and values.  Unlike almost
1840 		 * everything else in MCDI, these fields are in
1841 		 * network byte order.
1842 		 */
1843 		if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
1844 			match_fields |=
1845 				is_multicast_ether_addr(spec->loc_mac) ?
1846 				1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
1847 				1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
1848 #define COPY_FIELD(gen_flag, gen_field, mcdi_field)			     \
1849 		if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) {     \
1850 			match_fields |=					     \
1851 				1 << MC_CMD_FILTER_OP_IN_MATCH_ ##	     \
1852 				mcdi_field ## _LBN;			     \
1853 			BUILD_BUG_ON(					     \
1854 				MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
1855 				sizeof(spec->gen_field));		     \
1856 			memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ##	mcdi_field), \
1857 			       &spec->gen_field, sizeof(spec->gen_field));   \
1858 		}
1859 		COPY_FIELD(REM_HOST, rem_host, SRC_IP);
1860 		COPY_FIELD(LOC_HOST, loc_host, DST_IP);
1861 		COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
1862 		COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
1863 		COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
1864 		COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
1865 		COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
1866 		COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
1867 		COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
1868 		COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
1869 #undef COPY_FIELD
1870 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
1871 			       match_fields);
1872 	}
1873 
1874 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1875 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
1876 		       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
1877 		       MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
1878 		       MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
1879 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
1880 		       MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
1881 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
1882 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
1883 		       (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
1884 		       MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
1885 		       MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
1886 	if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
1887 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
1888 			       spec->rss_context !=
1889 			       EFX_FILTER_RSS_CONTEXT_DEFAULT ?
1890 			       spec->rss_context : nic_data->rx_rss_context);
1891 }
1892 
1893 static int efx_ef10_filter_push(struct efx_nic *efx,
1894 				const struct efx_filter_spec *spec,
1895 				u64 *handle, bool replacing)
1896 {
1897 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
1898 	MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
1899 	int rc;
1900 
1901 	efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
1902 	rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
1903 			  outbuf, sizeof(outbuf), NULL);
1904 	if (rc == 0)
1905 		*handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
1906 	return rc;
1907 }
1908 
1909 static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
1910 					enum efx_filter_match_flags match_flags)
1911 {
1912 	unsigned int match_pri;
1913 
1914 	for (match_pri = 0;
1915 	     match_pri < table->rx_match_count;
1916 	     match_pri++)
1917 		if (table->rx_match_flags[match_pri] == match_flags)
1918 			return match_pri;
1919 
1920 	return -EPROTONOSUPPORT;
1921 }
1922 
1923 static s32 efx_ef10_filter_insert(struct efx_nic *efx,
1924 				  struct efx_filter_spec *spec,
1925 				  bool replace_equal)
1926 {
1927 	struct efx_ef10_filter_table *table = efx->filter_state;
1928 	DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
1929 	struct efx_filter_spec *saved_spec;
1930 	unsigned int match_pri, hash;
1931 	unsigned int priv_flags;
1932 	bool replacing = false;
1933 	int ins_index = -1;
1934 	DEFINE_WAIT(wait);
1935 	bool is_mc_recip;
1936 	s32 rc;
1937 
1938 	/* For now, only support RX filters */
1939 	if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
1940 	    EFX_FILTER_FLAG_RX)
1941 		return -EINVAL;
1942 
1943 	rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
1944 	if (rc < 0)
1945 		return rc;
1946 	match_pri = rc;
1947 
1948 	hash = efx_ef10_filter_hash(spec);
1949 	is_mc_recip = efx_filter_is_mc_recipient(spec);
1950 	if (is_mc_recip)
1951 		bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
1952 
1953 	/* Find any existing filters with the same match tuple or
1954 	 * else a free slot to insert at.  If any of them are busy,
1955 	 * we have to wait and retry.
1956 	 */
1957 	for (;;) {
1958 		unsigned int depth = 1;
1959 		unsigned int i;
1960 
1961 		spin_lock_bh(&efx->filter_lock);
1962 
1963 		for (;;) {
1964 			i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
1965 			saved_spec = efx_ef10_filter_entry_spec(table, i);
1966 
1967 			if (!saved_spec) {
1968 				if (ins_index < 0)
1969 					ins_index = i;
1970 			} else if (efx_ef10_filter_equal(spec, saved_spec)) {
1971 				if (table->entry[i].spec &
1972 				    EFX_EF10_FILTER_FLAG_BUSY)
1973 					break;
1974 				if (spec->priority < saved_spec->priority &&
1975 				    !(saved_spec->priority ==
1976 				      EFX_FILTER_PRI_REQUIRED &&
1977 				      saved_spec->flags &
1978 				      EFX_FILTER_FLAG_RX_STACK)) {
1979 					rc = -EPERM;
1980 					goto out_unlock;
1981 				}
1982 				if (!is_mc_recip) {
1983 					/* This is the only one */
1984 					if (spec->priority ==
1985 					    saved_spec->priority &&
1986 					    !replace_equal) {
1987 						rc = -EEXIST;
1988 						goto out_unlock;
1989 					}
1990 					ins_index = i;
1991 					goto found;
1992 				} else if (spec->priority >
1993 					   saved_spec->priority ||
1994 					   (spec->priority ==
1995 					    saved_spec->priority &&
1996 					    replace_equal)) {
1997 					if (ins_index < 0)
1998 						ins_index = i;
1999 					else
2000 						__set_bit(depth, mc_rem_map);
2001 				}
2002 			}
2003 
2004 			/* Once we reach the maximum search depth, use
2005 			 * the first suitable slot or return -EBUSY if
2006 			 * there was none
2007 			 */
2008 			if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
2009 				if (ins_index < 0) {
2010 					rc = -EBUSY;
2011 					goto out_unlock;
2012 				}
2013 				goto found;
2014 			}
2015 
2016 			++depth;
2017 		}
2018 
2019 		prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2020 		spin_unlock_bh(&efx->filter_lock);
2021 		schedule();
2022 	}
2023 
2024 found:
2025 	/* Create a software table entry if necessary, and mark it
2026 	 * busy.  We might yet fail to insert, but any attempt to
2027 	 * insert a conflicting filter while we're waiting for the
2028 	 * firmware must find the busy entry.
2029 	 */
2030 	saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2031 	if (saved_spec) {
2032 		if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
2033 			/* Just make sure it won't be removed */
2034 			saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
2035 			table->entry[ins_index].spec &=
2036 				~EFX_EF10_FILTER_FLAG_STACK_OLD;
2037 			rc = ins_index;
2038 			goto out_unlock;
2039 		}
2040 		replacing = true;
2041 		priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
2042 	} else {
2043 		saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2044 		if (!saved_spec) {
2045 			rc = -ENOMEM;
2046 			goto out_unlock;
2047 		}
2048 		*saved_spec = *spec;
2049 		priv_flags = 0;
2050 	}
2051 	efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2052 				  priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
2053 
2054 	/* Mark lower-priority multicast recipients busy prior to removal */
2055 	if (is_mc_recip) {
2056 		unsigned int depth, i;
2057 
2058 		for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2059 			i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2060 			if (test_bit(depth, mc_rem_map))
2061 				table->entry[i].spec |=
2062 					EFX_EF10_FILTER_FLAG_BUSY;
2063 		}
2064 	}
2065 
2066 	spin_unlock_bh(&efx->filter_lock);
2067 
2068 	rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
2069 				  replacing);
2070 
2071 	/* Finalise the software table entry */
2072 	spin_lock_bh(&efx->filter_lock);
2073 	if (rc == 0) {
2074 		if (replacing) {
2075 			/* Update the fields that may differ */
2076 			saved_spec->priority = spec->priority;
2077 			saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
2078 			saved_spec->flags |= spec->flags;
2079 			saved_spec->rss_context = spec->rss_context;
2080 			saved_spec->dmaq_id = spec->dmaq_id;
2081 		}
2082 	} else if (!replacing) {
2083 		kfree(saved_spec);
2084 		saved_spec = NULL;
2085 	}
2086 	efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
2087 
2088 	/* Remove and finalise entries for lower-priority multicast
2089 	 * recipients
2090 	 */
2091 	if (is_mc_recip) {
2092 		MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2093 		unsigned int depth, i;
2094 
2095 		memset(inbuf, 0, sizeof(inbuf));
2096 
2097 		for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2098 			if (!test_bit(depth, mc_rem_map))
2099 				continue;
2100 
2101 			i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2102 			saved_spec = efx_ef10_filter_entry_spec(table, i);
2103 			priv_flags = efx_ef10_filter_entry_flags(table, i);
2104 
2105 			if (rc == 0) {
2106 				spin_unlock_bh(&efx->filter_lock);
2107 				MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2108 					       MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2109 				MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2110 					       table->entry[i].handle);
2111 				rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2112 						  inbuf, sizeof(inbuf),
2113 						  NULL, 0, NULL);
2114 				spin_lock_bh(&efx->filter_lock);
2115 			}
2116 
2117 			if (rc == 0) {
2118 				kfree(saved_spec);
2119 				saved_spec = NULL;
2120 				priv_flags = 0;
2121 			} else {
2122 				priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
2123 			}
2124 			efx_ef10_filter_set_entry(table, i, saved_spec,
2125 						  priv_flags);
2126 		}
2127 	}
2128 
2129 	/* If successful, return the inserted filter ID */
2130 	if (rc == 0)
2131 		rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
2132 
2133 	wake_up_all(&table->waitq);
2134 out_unlock:
2135 	spin_unlock_bh(&efx->filter_lock);
2136 	finish_wait(&table->waitq, &wait);
2137 	return rc;
2138 }
2139 
2140 void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
2141 {
2142 	/* no need to do anything here on EF10 */
2143 }
2144 
2145 /* Remove a filter.
2146  * If !stack_requested, remove by ID
2147  * If stack_requested, remove by index
2148  * Filter ID may come from userland and must be range-checked.
2149  */
2150 static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
2151 					   enum efx_filter_priority priority,
2152 					   u32 filter_id, bool stack_requested)
2153 {
2154 	unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2155 	struct efx_ef10_filter_table *table = efx->filter_state;
2156 	MCDI_DECLARE_BUF(inbuf,
2157 			 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2158 			 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2159 	struct efx_filter_spec *spec;
2160 	DEFINE_WAIT(wait);
2161 	int rc;
2162 
2163 	/* Find the software table entry and mark it busy.  Don't
2164 	 * remove it yet; any attempt to update while we're waiting
2165 	 * for the firmware must find the busy entry.
2166 	 */
2167 	for (;;) {
2168 		spin_lock_bh(&efx->filter_lock);
2169 		if (!(table->entry[filter_idx].spec &
2170 		      EFX_EF10_FILTER_FLAG_BUSY))
2171 			break;
2172 		prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2173 		spin_unlock_bh(&efx->filter_lock);
2174 		schedule();
2175 	}
2176 	spec = efx_ef10_filter_entry_spec(table, filter_idx);
2177 	if (!spec || spec->priority > priority ||
2178 	    (!stack_requested &&
2179 	     efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
2180 	     filter_id / HUNT_FILTER_TBL_ROWS)) {
2181 		rc = -ENOENT;
2182 		goto out_unlock;
2183 	}
2184 	table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2185 	spin_unlock_bh(&efx->filter_lock);
2186 
2187 	if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
2188 		/* Reset steering of a stack-owned filter */
2189 
2190 		struct efx_filter_spec new_spec = *spec;
2191 
2192 		new_spec.priority = EFX_FILTER_PRI_REQUIRED;
2193 		new_spec.flags = (EFX_FILTER_FLAG_RX |
2194 				  EFX_FILTER_FLAG_RX_RSS |
2195 				  EFX_FILTER_FLAG_RX_STACK);
2196 		new_spec.dmaq_id = 0;
2197 		new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
2198 		rc = efx_ef10_filter_push(efx, &new_spec,
2199 					  &table->entry[filter_idx].handle,
2200 					  true);
2201 
2202 		spin_lock_bh(&efx->filter_lock);
2203 		if (rc == 0)
2204 			*spec = new_spec;
2205 	} else {
2206 		/* Really remove the filter */
2207 
2208 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2209 			       efx_ef10_filter_is_exclusive(spec) ?
2210 			       MC_CMD_FILTER_OP_IN_OP_REMOVE :
2211 			       MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2212 		MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2213 			       table->entry[filter_idx].handle);
2214 		rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2215 				  inbuf, sizeof(inbuf), NULL, 0, NULL);
2216 
2217 		spin_lock_bh(&efx->filter_lock);
2218 		if (rc == 0) {
2219 			kfree(spec);
2220 			efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2221 		}
2222 	}
2223 	table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2224 	wake_up_all(&table->waitq);
2225 out_unlock:
2226 	spin_unlock_bh(&efx->filter_lock);
2227 	finish_wait(&table->waitq, &wait);
2228 	return rc;
2229 }
2230 
2231 static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
2232 				       enum efx_filter_priority priority,
2233 				       u32 filter_id)
2234 {
2235 	return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
2236 }
2237 
2238 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
2239 				    enum efx_filter_priority priority,
2240 				    u32 filter_id, struct efx_filter_spec *spec)
2241 {
2242 	unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2243 	struct efx_ef10_filter_table *table = efx->filter_state;
2244 	const struct efx_filter_spec *saved_spec;
2245 	int rc;
2246 
2247 	spin_lock_bh(&efx->filter_lock);
2248 	saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
2249 	if (saved_spec && saved_spec->priority == priority &&
2250 	    efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
2251 	    filter_id / HUNT_FILTER_TBL_ROWS) {
2252 		*spec = *saved_spec;
2253 		rc = 0;
2254 	} else {
2255 		rc = -ENOENT;
2256 	}
2257 	spin_unlock_bh(&efx->filter_lock);
2258 	return rc;
2259 }
2260 
2261 static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
2262 				     enum efx_filter_priority priority)
2263 {
2264 	/* TODO */
2265 }
2266 
2267 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
2268 					 enum efx_filter_priority priority)
2269 {
2270 	struct efx_ef10_filter_table *table = efx->filter_state;
2271 	unsigned int filter_idx;
2272 	s32 count = 0;
2273 
2274 	spin_lock_bh(&efx->filter_lock);
2275 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2276 		if (table->entry[filter_idx].spec &&
2277 		    efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
2278 		    priority)
2279 			++count;
2280 	}
2281 	spin_unlock_bh(&efx->filter_lock);
2282 	return count;
2283 }
2284 
2285 static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
2286 {
2287 	struct efx_ef10_filter_table *table = efx->filter_state;
2288 
2289 	return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
2290 }
2291 
2292 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
2293 				      enum efx_filter_priority priority,
2294 				      u32 *buf, u32 size)
2295 {
2296 	struct efx_ef10_filter_table *table = efx->filter_state;
2297 	struct efx_filter_spec *spec;
2298 	unsigned int filter_idx;
2299 	s32 count = 0;
2300 
2301 	spin_lock_bh(&efx->filter_lock);
2302 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2303 		spec = efx_ef10_filter_entry_spec(table, filter_idx);
2304 		if (spec && spec->priority == priority) {
2305 			if (count == size) {
2306 				count = -EMSGSIZE;
2307 				break;
2308 			}
2309 			buf[count++] = (efx_ef10_filter_rx_match_pri(
2310 						table, spec->match_flags) *
2311 					HUNT_FILTER_TBL_ROWS +
2312 					filter_idx);
2313 		}
2314 	}
2315 	spin_unlock_bh(&efx->filter_lock);
2316 	return count;
2317 }
2318 
2319 #ifdef CONFIG_RFS_ACCEL
2320 
2321 static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
2322 
2323 static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
2324 				      struct efx_filter_spec *spec)
2325 {
2326 	struct efx_ef10_filter_table *table = efx->filter_state;
2327 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2328 	struct efx_filter_spec *saved_spec;
2329 	unsigned int hash, i, depth = 1;
2330 	bool replacing = false;
2331 	int ins_index = -1;
2332 	u64 cookie;
2333 	s32 rc;
2334 
2335 	/* Must be an RX filter without RSS and not for a multicast
2336 	 * destination address (RFS only works for connected sockets).
2337 	 * These restrictions allow us to pass only a tiny amount of
2338 	 * data through to the completion function.
2339 	 */
2340 	EFX_WARN_ON_PARANOID(spec->flags !=
2341 			     (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
2342 	EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
2343 	EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
2344 
2345 	hash = efx_ef10_filter_hash(spec);
2346 
2347 	spin_lock_bh(&efx->filter_lock);
2348 
2349 	/* Find any existing filter with the same match tuple or else
2350 	 * a free slot to insert at.  If an existing filter is busy,
2351 	 * we have to give up.
2352 	 */
2353 	for (;;) {
2354 		i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2355 		saved_spec = efx_ef10_filter_entry_spec(table, i);
2356 
2357 		if (!saved_spec) {
2358 			if (ins_index < 0)
2359 				ins_index = i;
2360 		} else if (efx_ef10_filter_equal(spec, saved_spec)) {
2361 			if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
2362 				rc = -EBUSY;
2363 				goto fail_unlock;
2364 			}
2365 			EFX_WARN_ON_PARANOID(saved_spec->flags &
2366 					     EFX_FILTER_FLAG_RX_STACK);
2367 			if (spec->priority < saved_spec->priority) {
2368 				rc = -EPERM;
2369 				goto fail_unlock;
2370 			}
2371 			ins_index = i;
2372 			break;
2373 		}
2374 
2375 		/* Once we reach the maximum search depth, use the
2376 		 * first suitable slot or return -EBUSY if there was
2377 		 * none
2378 		 */
2379 		if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
2380 			if (ins_index < 0) {
2381 				rc = -EBUSY;
2382 				goto fail_unlock;
2383 			}
2384 			break;
2385 		}
2386 
2387 		++depth;
2388 	}
2389 
2390 	/* Create a software table entry if necessary, and mark it
2391 	 * busy.  We might yet fail to insert, but any attempt to
2392 	 * insert a conflicting filter while we're waiting for the
2393 	 * firmware must find the busy entry.
2394 	 */
2395 	saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2396 	if (saved_spec) {
2397 		replacing = true;
2398 	} else {
2399 		saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2400 		if (!saved_spec) {
2401 			rc = -ENOMEM;
2402 			goto fail_unlock;
2403 		}
2404 		*saved_spec = *spec;
2405 	}
2406 	efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2407 				  EFX_EF10_FILTER_FLAG_BUSY);
2408 
2409 	spin_unlock_bh(&efx->filter_lock);
2410 
2411 	/* Pack up the variables needed on completion */
2412 	cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
2413 
2414 	efx_ef10_filter_push_prep(efx, spec, inbuf,
2415 				  table->entry[ins_index].handle, replacing);
2416 	efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2417 			   MC_CMD_FILTER_OP_OUT_LEN,
2418 			   efx_ef10_filter_rfs_insert_complete, cookie);
2419 
2420 	return ins_index;
2421 
2422 fail_unlock:
2423 	spin_unlock_bh(&efx->filter_lock);
2424 	return rc;
2425 }
2426 
2427 static void
2428 efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
2429 				    int rc, efx_dword_t *outbuf,
2430 				    size_t outlen_actual)
2431 {
2432 	struct efx_ef10_filter_table *table = efx->filter_state;
2433 	unsigned int ins_index, dmaq_id;
2434 	struct efx_filter_spec *spec;
2435 	bool replacing;
2436 
2437 	/* Unpack the cookie */
2438 	replacing = cookie >> 31;
2439 	ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
2440 	dmaq_id = cookie & 0xffff;
2441 
2442 	spin_lock_bh(&efx->filter_lock);
2443 	spec = efx_ef10_filter_entry_spec(table, ins_index);
2444 	if (rc == 0) {
2445 		table->entry[ins_index].handle =
2446 			MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
2447 		if (replacing)
2448 			spec->dmaq_id = dmaq_id;
2449 	} else if (!replacing) {
2450 		kfree(spec);
2451 		spec = NULL;
2452 	}
2453 	efx_ef10_filter_set_entry(table, ins_index, spec, 0);
2454 	spin_unlock_bh(&efx->filter_lock);
2455 
2456 	wake_up_all(&table->waitq);
2457 }
2458 
2459 static void
2460 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2461 				    unsigned long filter_idx,
2462 				    int rc, efx_dword_t *outbuf,
2463 				    size_t outlen_actual);
2464 
2465 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2466 					   unsigned int filter_idx)
2467 {
2468 	struct efx_ef10_filter_table *table = efx->filter_state;
2469 	struct efx_filter_spec *spec =
2470 		efx_ef10_filter_entry_spec(table, filter_idx);
2471 	MCDI_DECLARE_BUF(inbuf,
2472 			 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2473 			 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2474 
2475 	if (!spec ||
2476 	    (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
2477 	    spec->priority != EFX_FILTER_PRI_HINT ||
2478 	    !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
2479 				 flow_id, filter_idx))
2480 		return false;
2481 
2482 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2483 		       MC_CMD_FILTER_OP_IN_OP_REMOVE);
2484 	MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2485 		       table->entry[filter_idx].handle);
2486 	if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
2487 			       efx_ef10_filter_rfs_expire_complete, filter_idx))
2488 		return false;
2489 
2490 	table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2491 	return true;
2492 }
2493 
2494 static void
2495 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2496 				    unsigned long filter_idx,
2497 				    int rc, efx_dword_t *outbuf,
2498 				    size_t outlen_actual)
2499 {
2500 	struct efx_ef10_filter_table *table = efx->filter_state;
2501 	struct efx_filter_spec *spec =
2502 		efx_ef10_filter_entry_spec(table, filter_idx);
2503 
2504 	spin_lock_bh(&efx->filter_lock);
2505 	if (rc == 0) {
2506 		kfree(spec);
2507 		efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2508 	}
2509 	table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2510 	wake_up_all(&table->waitq);
2511 	spin_unlock_bh(&efx->filter_lock);
2512 }
2513 
2514 #endif /* CONFIG_RFS_ACCEL */
2515 
2516 static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
2517 {
2518 	int match_flags = 0;
2519 
2520 #define MAP_FLAG(gen_flag, mcdi_field) {				\
2521 		u32 old_mcdi_flags = mcdi_flags;			\
2522 		mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ##	\
2523 				mcdi_field ## _LBN);			\
2524 		if (mcdi_flags != old_mcdi_flags)			\
2525 			match_flags |= EFX_FILTER_MATCH_ ## gen_flag;	\
2526 	}
2527 	MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
2528 	MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
2529 	MAP_FLAG(REM_HOST, SRC_IP);
2530 	MAP_FLAG(LOC_HOST, DST_IP);
2531 	MAP_FLAG(REM_MAC, SRC_MAC);
2532 	MAP_FLAG(REM_PORT, SRC_PORT);
2533 	MAP_FLAG(LOC_MAC, DST_MAC);
2534 	MAP_FLAG(LOC_PORT, DST_PORT);
2535 	MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
2536 	MAP_FLAG(INNER_VID, INNER_VLAN);
2537 	MAP_FLAG(OUTER_VID, OUTER_VLAN);
2538 	MAP_FLAG(IP_PROTO, IP_PROTO);
2539 #undef MAP_FLAG
2540 
2541 	/* Did we map them all? */
2542 	if (mcdi_flags)
2543 		return -EINVAL;
2544 
2545 	return match_flags;
2546 }
2547 
2548 static int efx_ef10_filter_table_probe(struct efx_nic *efx)
2549 {
2550 	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
2551 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
2552 	unsigned int pd_match_pri, pd_match_count;
2553 	struct efx_ef10_filter_table *table;
2554 	size_t outlen;
2555 	int rc;
2556 
2557 	table = kzalloc(sizeof(*table), GFP_KERNEL);
2558 	if (!table)
2559 		return -ENOMEM;
2560 
2561 	/* Find out which RX filter types are supported, and their priorities */
2562 	MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
2563 		       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
2564 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
2565 			  inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
2566 			  &outlen);
2567 	if (rc)
2568 		goto fail;
2569 	pd_match_count = MCDI_VAR_ARRAY_LEN(
2570 		outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
2571 	table->rx_match_count = 0;
2572 
2573 	for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
2574 		u32 mcdi_flags =
2575 			MCDI_ARRAY_DWORD(
2576 				outbuf,
2577 				GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
2578 				pd_match_pri);
2579 		rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
2580 		if (rc < 0) {
2581 			netif_dbg(efx, probe, efx->net_dev,
2582 				  "%s: fw flags %#x pri %u not supported in driver\n",
2583 				  __func__, mcdi_flags, pd_match_pri);
2584 		} else {
2585 			netif_dbg(efx, probe, efx->net_dev,
2586 				  "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
2587 				  __func__, mcdi_flags, pd_match_pri,
2588 				  rc, table->rx_match_count);
2589 			table->rx_match_flags[table->rx_match_count++] = rc;
2590 		}
2591 	}
2592 
2593 	table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
2594 	if (!table->entry) {
2595 		rc = -ENOMEM;
2596 		goto fail;
2597 	}
2598 
2599 	efx->filter_state = table;
2600 	init_waitqueue_head(&table->waitq);
2601 	return 0;
2602 
2603 fail:
2604 	kfree(table);
2605 	return rc;
2606 }
2607 
2608 static void efx_ef10_filter_table_restore(struct efx_nic *efx)
2609 {
2610 	struct efx_ef10_filter_table *table = efx->filter_state;
2611 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2612 	struct efx_filter_spec *spec;
2613 	unsigned int filter_idx;
2614 	bool failed = false;
2615 	int rc;
2616 
2617 	if (!nic_data->must_restore_filters)
2618 		return;
2619 
2620 	spin_lock_bh(&efx->filter_lock);
2621 
2622 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2623 		spec = efx_ef10_filter_entry_spec(table, filter_idx);
2624 		if (!spec)
2625 			continue;
2626 
2627 		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2628 		spin_unlock_bh(&efx->filter_lock);
2629 
2630 		rc = efx_ef10_filter_push(efx, spec,
2631 					  &table->entry[filter_idx].handle,
2632 					  false);
2633 		if (rc)
2634 			failed = true;
2635 
2636 		spin_lock_bh(&efx->filter_lock);
2637 		if (rc) {
2638 			kfree(spec);
2639 			efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2640 		} else {
2641 			table->entry[filter_idx].spec &=
2642 				~EFX_EF10_FILTER_FLAG_BUSY;
2643 		}
2644 	}
2645 
2646 	spin_unlock_bh(&efx->filter_lock);
2647 
2648 	if (failed)
2649 		netif_err(efx, hw, efx->net_dev,
2650 			  "unable to restore all filters\n");
2651 	else
2652 		nic_data->must_restore_filters = false;
2653 }
2654 
2655 static void efx_ef10_filter_table_remove(struct efx_nic *efx)
2656 {
2657 	struct efx_ef10_filter_table *table = efx->filter_state;
2658 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2659 	struct efx_filter_spec *spec;
2660 	unsigned int filter_idx;
2661 	int rc;
2662 
2663 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2664 		spec = efx_ef10_filter_entry_spec(table, filter_idx);
2665 		if (!spec)
2666 			continue;
2667 
2668 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2669 			       efx_ef10_filter_is_exclusive(spec) ?
2670 			       MC_CMD_FILTER_OP_IN_OP_REMOVE :
2671 			       MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2672 		MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2673 			       table->entry[filter_idx].handle);
2674 		rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2675 				  NULL, 0, NULL);
2676 
2677 		WARN_ON(rc != 0);
2678 		kfree(spec);
2679 	}
2680 
2681 	vfree(table->entry);
2682 	kfree(table);
2683 }
2684 
2685 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
2686 {
2687 	struct efx_ef10_filter_table *table = efx->filter_state;
2688 	struct net_device *net_dev = efx->net_dev;
2689 	struct efx_filter_spec spec;
2690 	bool remove_failed = false;
2691 	struct netdev_hw_addr *uc;
2692 	struct netdev_hw_addr *mc;
2693 	unsigned int filter_idx;
2694 	int i, n, rc;
2695 
2696 	if (!efx_dev_registered(efx))
2697 		return;
2698 
2699 	/* Mark old filters that may need to be removed */
2700 	spin_lock_bh(&efx->filter_lock);
2701 	n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
2702 	for (i = 0; i < n; i++) {
2703 		filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
2704 		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
2705 	}
2706 	n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
2707 	for (i = 0; i < n; i++) {
2708 		filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
2709 		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
2710 	}
2711 	spin_unlock_bh(&efx->filter_lock);
2712 
2713 	/* Copy/convert the address lists; add the primary station
2714 	 * address and broadcast address
2715 	 */
2716 	netif_addr_lock_bh(net_dev);
2717 	if (net_dev->flags & IFF_PROMISC ||
2718 	    netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
2719 		table->stack_uc_count = -1;
2720 	} else {
2721 		table->stack_uc_count = 1 + netdev_uc_count(net_dev);
2722 		memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
2723 		       ETH_ALEN);
2724 		i = 1;
2725 		netdev_for_each_uc_addr(uc, net_dev) {
2726 			memcpy(table->stack_uc_list[i].addr,
2727 			       uc->addr, ETH_ALEN);
2728 			i++;
2729 		}
2730 	}
2731 	if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
2732 	    netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
2733 		table->stack_mc_count = -1;
2734 	} else {
2735 		table->stack_mc_count = 1 + netdev_mc_count(net_dev);
2736 		eth_broadcast_addr(table->stack_mc_list[0].addr);
2737 		i = 1;
2738 		netdev_for_each_mc_addr(mc, net_dev) {
2739 			memcpy(table->stack_mc_list[i].addr,
2740 			       mc->addr, ETH_ALEN);
2741 			i++;
2742 		}
2743 	}
2744 	netif_addr_unlock_bh(net_dev);
2745 
2746 	/* Insert/renew unicast filters */
2747 	if (table->stack_uc_count >= 0) {
2748 		for (i = 0; i < table->stack_uc_count; i++) {
2749 			efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2750 					   EFX_FILTER_FLAG_RX_RSS |
2751 					   EFX_FILTER_FLAG_RX_STACK,
2752 					   0);
2753 			efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
2754 						 table->stack_uc_list[i].addr);
2755 			rc = efx_ef10_filter_insert(efx, &spec, true);
2756 			if (rc < 0) {
2757 				/* Fall back to unicast-promisc */
2758 				while (i--)
2759 					efx_ef10_filter_remove_safe(
2760 						efx, EFX_FILTER_PRI_REQUIRED,
2761 						table->stack_uc_list[i].id);
2762 				table->stack_uc_count = -1;
2763 				break;
2764 			}
2765 			table->stack_uc_list[i].id = rc;
2766 		}
2767 	}
2768 	if (table->stack_uc_count < 0) {
2769 		efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2770 				   EFX_FILTER_FLAG_RX_RSS |
2771 				   EFX_FILTER_FLAG_RX_STACK,
2772 				   0);
2773 		efx_filter_set_uc_def(&spec);
2774 		rc = efx_ef10_filter_insert(efx, &spec, true);
2775 		if (rc < 0) {
2776 			WARN_ON(1);
2777 			table->stack_uc_count = 0;
2778 		} else {
2779 			table->stack_uc_list[0].id = rc;
2780 		}
2781 	}
2782 
2783 	/* Insert/renew multicast filters */
2784 	if (table->stack_mc_count >= 0) {
2785 		for (i = 0; i < table->stack_mc_count; i++) {
2786 			efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2787 					   EFX_FILTER_FLAG_RX_RSS |
2788 					   EFX_FILTER_FLAG_RX_STACK,
2789 					   0);
2790 			efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
2791 						 table->stack_mc_list[i].addr);
2792 			rc = efx_ef10_filter_insert(efx, &spec, true);
2793 			if (rc < 0) {
2794 				/* Fall back to multicast-promisc */
2795 				while (i--)
2796 					efx_ef10_filter_remove_safe(
2797 						efx, EFX_FILTER_PRI_REQUIRED,
2798 						table->stack_mc_list[i].id);
2799 				table->stack_mc_count = -1;
2800 				break;
2801 			}
2802 			table->stack_mc_list[i].id = rc;
2803 		}
2804 	}
2805 	if (table->stack_mc_count < 0) {
2806 		efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2807 				   EFX_FILTER_FLAG_RX_RSS |
2808 				   EFX_FILTER_FLAG_RX_STACK,
2809 				   0);
2810 		efx_filter_set_mc_def(&spec);
2811 		rc = efx_ef10_filter_insert(efx, &spec, true);
2812 		if (rc < 0) {
2813 			WARN_ON(1);
2814 			table->stack_mc_count = 0;
2815 		} else {
2816 			table->stack_mc_list[0].id = rc;
2817 		}
2818 	}
2819 
2820 	/* Remove filters that weren't renewed.  Since nothing else
2821 	 * changes the STACK_OLD flag or removes these filters, we
2822 	 * don't need to hold the filter_lock while scanning for
2823 	 * these filters.
2824 	 */
2825 	for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
2826 		if (ACCESS_ONCE(table->entry[i].spec) &
2827 		    EFX_EF10_FILTER_FLAG_STACK_OLD) {
2828 			if (efx_ef10_filter_remove_internal(efx,
2829 					EFX_FILTER_PRI_REQUIRED,
2830 					i, true) < 0)
2831 				remove_failed = true;
2832 		}
2833 	}
2834 	WARN_ON(remove_failed);
2835 }
2836 
2837 static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
2838 {
2839 	efx_ef10_filter_sync_rx_mode(efx);
2840 
2841 	return efx_mcdi_set_mac(efx);
2842 }
2843 
2844 #ifdef CONFIG_SFC_MTD
2845 
2846 struct efx_ef10_nvram_type_info {
2847 	u16 type, type_mask;
2848 	u8 port;
2849 	const char *name;
2850 };
2851 
2852 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
2853 	{ NVRAM_PARTITION_TYPE_MC_FIRMWARE,	   0,    0, "sfc_mcfw" },
2854 	{ NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0,    0, "sfc_mcfw_backup" },
2855 	{ NVRAM_PARTITION_TYPE_EXPANSION_ROM,	   0,    0, "sfc_exp_rom" },
2856 	{ NVRAM_PARTITION_TYPE_STATIC_CONFIG,	   0,    0, "sfc_static_cfg" },
2857 	{ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,	   0,    0, "sfc_dynamic_cfg" },
2858 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0,   0, "sfc_exp_rom_cfg" },
2859 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0,   1, "sfc_exp_rom_cfg" },
2860 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0,   2, "sfc_exp_rom_cfg" },
2861 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0,   3, "sfc_exp_rom_cfg" },
2862 	{ NVRAM_PARTITION_TYPE_PHY_MIN,		   0xff, 0, "sfc_phy_fw" },
2863 };
2864 
2865 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
2866 					struct efx_mcdi_mtd_partition *part,
2867 					unsigned int type)
2868 {
2869 	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
2870 	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
2871 	const struct efx_ef10_nvram_type_info *info;
2872 	size_t size, erase_size, outlen;
2873 	bool protected;
2874 	int rc;
2875 
2876 	for (info = efx_ef10_nvram_types; ; info++) {
2877 		if (info ==
2878 		    efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
2879 			return -ENODEV;
2880 		if ((type & ~info->type_mask) == info->type)
2881 			break;
2882 	}
2883 	if (info->port != efx_port_num(efx))
2884 		return -ENODEV;
2885 
2886 	rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
2887 	if (rc)
2888 		return rc;
2889 	if (protected)
2890 		return -ENODEV; /* hide it */
2891 
2892 	part->nvram_type = type;
2893 
2894 	MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
2895 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
2896 			  outbuf, sizeof(outbuf), &outlen);
2897 	if (rc)
2898 		return rc;
2899 	if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
2900 		return -EIO;
2901 	if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
2902 	    (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
2903 		part->fw_subtype = MCDI_DWORD(outbuf,
2904 					      NVRAM_METADATA_OUT_SUBTYPE);
2905 
2906 	part->common.dev_type_name = "EF10 NVRAM manager";
2907 	part->common.type_name = info->name;
2908 
2909 	part->common.mtd.type = MTD_NORFLASH;
2910 	part->common.mtd.flags = MTD_CAP_NORFLASH;
2911 	part->common.mtd.size = size;
2912 	part->common.mtd.erasesize = erase_size;
2913 
2914 	return 0;
2915 }
2916 
2917 static int efx_ef10_mtd_probe(struct efx_nic *efx)
2918 {
2919 	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
2920 	struct efx_mcdi_mtd_partition *parts;
2921 	size_t outlen, n_parts_total, i, n_parts;
2922 	unsigned int type;
2923 	int rc;
2924 
2925 	ASSERT_RTNL();
2926 
2927 	BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
2928 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
2929 			  outbuf, sizeof(outbuf), &outlen);
2930 	if (rc)
2931 		return rc;
2932 	if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
2933 		return -EIO;
2934 
2935 	n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
2936 	if (n_parts_total >
2937 	    MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
2938 		return -EIO;
2939 
2940 	parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
2941 	if (!parts)
2942 		return -ENOMEM;
2943 
2944 	n_parts = 0;
2945 	for (i = 0; i < n_parts_total; i++) {
2946 		type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
2947 					i);
2948 		rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
2949 		if (rc == 0)
2950 			n_parts++;
2951 		else if (rc != -ENODEV)
2952 			goto fail;
2953 	}
2954 
2955 	rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
2956 fail:
2957 	if (rc)
2958 		kfree(parts);
2959 	return rc;
2960 }
2961 
2962 #endif /* CONFIG_SFC_MTD */
2963 
2964 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
2965 {
2966 	_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
2967 }
2968 
2969 const struct efx_nic_type efx_hunt_a0_nic_type = {
2970 	.mem_map_size = efx_ef10_mem_map_size,
2971 	.probe = efx_ef10_probe,
2972 	.remove = efx_ef10_remove,
2973 	.dimension_resources = efx_ef10_dimension_resources,
2974 	.init = efx_ef10_init_nic,
2975 	.fini = efx_port_dummy_op_void,
2976 	.map_reset_reason = efx_mcdi_map_reset_reason,
2977 	.map_reset_flags = efx_ef10_map_reset_flags,
2978 	.reset = efx_mcdi_reset,
2979 	.probe_port = efx_mcdi_port_probe,
2980 	.remove_port = efx_mcdi_port_remove,
2981 	.fini_dmaq = efx_ef10_fini_dmaq,
2982 	.describe_stats = efx_ef10_describe_stats,
2983 	.update_stats = efx_ef10_update_stats,
2984 	.start_stats = efx_mcdi_mac_start_stats,
2985 	.stop_stats = efx_mcdi_mac_stop_stats,
2986 	.set_id_led = efx_mcdi_set_id_led,
2987 	.push_irq_moderation = efx_ef10_push_irq_moderation,
2988 	.reconfigure_mac = efx_ef10_mac_reconfigure,
2989 	.check_mac_fault = efx_mcdi_mac_check_fault,
2990 	.reconfigure_port = efx_mcdi_port_reconfigure,
2991 	.get_wol = efx_ef10_get_wol,
2992 	.set_wol = efx_ef10_set_wol,
2993 	.resume_wol = efx_port_dummy_op_void,
2994 	/* TODO: test_chip */
2995 	.test_nvram = efx_mcdi_nvram_test_all,
2996 	.mcdi_request = efx_ef10_mcdi_request,
2997 	.mcdi_poll_response = efx_ef10_mcdi_poll_response,
2998 	.mcdi_read_response = efx_ef10_mcdi_read_response,
2999 	.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
3000 	.irq_enable_master = efx_port_dummy_op_void,
3001 	.irq_test_generate = efx_ef10_irq_test_generate,
3002 	.irq_disable_non_ev = efx_port_dummy_op_void,
3003 	.irq_handle_msi = efx_ef10_msi_interrupt,
3004 	.irq_handle_legacy = efx_ef10_legacy_interrupt,
3005 	.tx_probe = efx_ef10_tx_probe,
3006 	.tx_init = efx_ef10_tx_init,
3007 	.tx_remove = efx_ef10_tx_remove,
3008 	.tx_write = efx_ef10_tx_write,
3009 	.rx_push_indir_table = efx_ef10_rx_push_indir_table,
3010 	.rx_probe = efx_ef10_rx_probe,
3011 	.rx_init = efx_ef10_rx_init,
3012 	.rx_remove = efx_ef10_rx_remove,
3013 	.rx_write = efx_ef10_rx_write,
3014 	.rx_defer_refill = efx_ef10_rx_defer_refill,
3015 	.ev_probe = efx_ef10_ev_probe,
3016 	.ev_init = efx_ef10_ev_init,
3017 	.ev_fini = efx_ef10_ev_fini,
3018 	.ev_remove = efx_ef10_ev_remove,
3019 	.ev_process = efx_ef10_ev_process,
3020 	.ev_read_ack = efx_ef10_ev_read_ack,
3021 	.ev_test_generate = efx_ef10_ev_test_generate,
3022 	.filter_table_probe = efx_ef10_filter_table_probe,
3023 	.filter_table_restore = efx_ef10_filter_table_restore,
3024 	.filter_table_remove = efx_ef10_filter_table_remove,
3025 	.filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
3026 	.filter_insert = efx_ef10_filter_insert,
3027 	.filter_remove_safe = efx_ef10_filter_remove_safe,
3028 	.filter_get_safe = efx_ef10_filter_get_safe,
3029 	.filter_clear_rx = efx_ef10_filter_clear_rx,
3030 	.filter_count_rx_used = efx_ef10_filter_count_rx_used,
3031 	.filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
3032 	.filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
3033 #ifdef CONFIG_RFS_ACCEL
3034 	.filter_rfs_insert = efx_ef10_filter_rfs_insert,
3035 	.filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
3036 #endif
3037 #ifdef CONFIG_SFC_MTD
3038 	.mtd_probe = efx_ef10_mtd_probe,
3039 	.mtd_rename = efx_mcdi_mtd_rename,
3040 	.mtd_read = efx_mcdi_mtd_read,
3041 	.mtd_erase = efx_mcdi_mtd_erase,
3042 	.mtd_write = efx_mcdi_mtd_write,
3043 	.mtd_sync = efx_mcdi_mtd_sync,
3044 #endif
3045 	.ptp_write_host_time = efx_ef10_ptp_write_host_time,
3046 
3047 	.revision = EFX_REV_HUNT_A0,
3048 	.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
3049 	.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
3050 	.rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
3051 	.can_rx_scatter = true,
3052 	.always_rx_scatter = true,
3053 	.max_interrupt_mode = EFX_INT_MODE_MSIX,
3054 	.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
3055 	.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3056 			     NETIF_F_RXHASH | NETIF_F_NTUPLE),
3057 	.mcdi_max_ver = 2,
3058 	.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
3059 };
3060