xref: /openbmc/linux/drivers/net/ethernet/sfc/ef10.c (revision f7777dcc)
1 /****************************************************************************
2  * Driver for Solarflare network controllers and boards
3  * Copyright 2012-2013 Solarflare Communications Inc.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published
7  * by the Free Software Foundation, incorporated herein by reference.
8  */
9 
10 #include "net_driver.h"
11 #include "ef10_regs.h"
12 #include "io.h"
13 #include "mcdi.h"
14 #include "mcdi_pcol.h"
15 #include "nic.h"
16 #include "workarounds.h"
17 #include <linux/in.h>
18 #include <linux/jhash.h>
19 #include <linux/wait.h>
20 #include <linux/workqueue.h>
21 
22 /* Hardware control for EF10 architecture including 'Huntington'. */
23 
24 #define EFX_EF10_DRVGEN_EV		7
25 enum {
26 	EFX_EF10_TEST = 1,
27 	EFX_EF10_REFILL,
28 };
29 
30 /* The reserved RSS context value */
31 #define EFX_EF10_RSS_CONTEXT_INVALID	0xffffffff
32 
33 /* The filter table(s) are managed by firmware and we have write-only
34  * access.  When removing filters we must identify them to the
35  * firmware by a 64-bit handle, but this is too wide for Linux kernel
36  * interfaces (32-bit for RX NFC, 16-bit for RFS).  Also, we need to
37  * be able to tell in advance whether a requested insertion will
38  * replace an existing filter.  Therefore we maintain a software hash
39  * table, which should be at least as large as the hardware hash
40  * table.
41  *
42  * Huntington has a single 8K filter table shared between all filter
43  * types and both ports.
44  */
45 #define HUNT_FILTER_TBL_ROWS 8192
46 
47 struct efx_ef10_filter_table {
48 /* The RX match field masks supported by this fw & hw, in order of priority */
49 	enum efx_filter_match_flags rx_match_flags[
50 		MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
51 	unsigned int rx_match_count;
52 
53 	struct {
54 		unsigned long spec;	/* pointer to spec plus flag bits */
55 /* BUSY flag indicates that an update is in progress.  STACK_OLD is
56  * used to mark and sweep stack-owned MAC filters.
57  */
58 #define EFX_EF10_FILTER_FLAG_BUSY	1UL
59 #define EFX_EF10_FILTER_FLAG_STACK_OLD	2UL
60 #define EFX_EF10_FILTER_FLAGS		3UL
61 		u64 handle;		/* firmware handle */
62 	} *entry;
63 	wait_queue_head_t waitq;
64 /* Shadow of net_device address lists, guarded by mac_lock */
65 #define EFX_EF10_FILTER_STACK_UC_MAX	32
66 #define EFX_EF10_FILTER_STACK_MC_MAX	256
67 	struct {
68 		u8 addr[ETH_ALEN];
69 		u16 id;
70 	} stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
71 	  stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
72 	int stack_uc_count;		/* negative for PROMISC */
73 	int stack_mc_count;		/* negative for PROMISC/ALLMULTI */
74 };
75 
76 /* An arbitrary search limit for the software hash table */
77 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
78 
79 static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
80 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
81 static void efx_ef10_filter_table_remove(struct efx_nic *efx);
82 
83 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
84 {
85 	efx_dword_t reg;
86 
87 	efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
88 	return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
89 		EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
90 }
91 
92 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
93 {
94 	return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
95 }
96 
97 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
98 {
99 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
100 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
101 	size_t outlen;
102 	int rc;
103 
104 	BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
105 
106 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
107 			  outbuf, sizeof(outbuf), &outlen);
108 	if (rc)
109 		return rc;
110 	if (outlen < sizeof(outbuf)) {
111 		netif_err(efx, drv, efx->net_dev,
112 			  "unable to read datapath firmware capabilities\n");
113 		return -EIO;
114 	}
115 
116 	nic_data->datapath_caps =
117 		MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
118 
119 	if (!(nic_data->datapath_caps &
120 	      (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
121 		netif_err(efx, drv, efx->net_dev,
122 			  "current firmware does not support TSO\n");
123 		return -ENODEV;
124 	}
125 
126 	if (!(nic_data->datapath_caps &
127 	      (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
128 		netif_err(efx, probe, efx->net_dev,
129 			  "current firmware does not support an RX prefix\n");
130 		return -ENODEV;
131 	}
132 
133 	return 0;
134 }
135 
136 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
137 {
138 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
139 	int rc;
140 
141 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
142 			  outbuf, sizeof(outbuf), NULL);
143 	if (rc)
144 		return rc;
145 	rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
146 	return rc > 0 ? rc : -ERANGE;
147 }
148 
149 static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
150 {
151 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
152 	size_t outlen;
153 	int rc;
154 
155 	BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
156 
157 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
158 			  outbuf, sizeof(outbuf), &outlen);
159 	if (rc)
160 		return rc;
161 	if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
162 		return -EIO;
163 
164 	memcpy(mac_address,
165 	       MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
166 	return 0;
167 }
168 
169 static int efx_ef10_probe(struct efx_nic *efx)
170 {
171 	struct efx_ef10_nic_data *nic_data;
172 	int i, rc;
173 
174 	/* We can have one VI for each 8K region.  However we need
175 	 * multiple TX queues per channel.
176 	 */
177 	efx->max_channels =
178 		min_t(unsigned int,
179 		      EFX_MAX_CHANNELS,
180 		      resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
181 		      (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
182 	BUG_ON(efx->max_channels == 0);
183 
184 	nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
185 	if (!nic_data)
186 		return -ENOMEM;
187 	efx->nic_data = nic_data;
188 
189 	rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
190 				  8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
191 	if (rc)
192 		goto fail1;
193 
194 	/* Get the MC's warm boot count.  In case it's rebooting right
195 	 * now, be prepared to retry.
196 	 */
197 	i = 0;
198 	for (;;) {
199 		rc = efx_ef10_get_warm_boot_count(efx);
200 		if (rc >= 0)
201 			break;
202 		if (++i == 5)
203 			goto fail2;
204 		ssleep(1);
205 	}
206 	nic_data->warm_boot_count = rc;
207 
208 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
209 
210 	/* In case we're recovering from a crash (kexec), we want to
211 	 * cancel any outstanding request by the previous user of this
212 	 * function.  We send a special message using the least
213 	 * significant bits of the 'high' (doorbell) register.
214 	 */
215 	_efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
216 
217 	rc = efx_mcdi_init(efx);
218 	if (rc)
219 		goto fail2;
220 
221 	/* Reset (most) configuration for this function */
222 	rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
223 	if (rc)
224 		goto fail3;
225 
226 	/* Enable event logging */
227 	rc = efx_mcdi_log_ctrl(efx, true, false, 0);
228 	if (rc)
229 		goto fail3;
230 
231 	rc = efx_ef10_init_datapath_caps(efx);
232 	if (rc < 0)
233 		goto fail3;
234 
235 	efx->rx_packet_len_offset =
236 		ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
237 
238 	rc = efx_mcdi_port_get_number(efx);
239 	if (rc < 0)
240 		goto fail3;
241 	efx->port_num = rc;
242 
243 	rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
244 	if (rc)
245 		goto fail3;
246 
247 	rc = efx_ef10_get_sysclk_freq(efx);
248 	if (rc < 0)
249 		goto fail3;
250 	efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
251 
252 	/* Check whether firmware supports bug 35388 workaround */
253 	rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
254 	if (rc == 0)
255 		nic_data->workaround_35388 = true;
256 	else if (rc != -ENOSYS && rc != -ENOENT)
257 		goto fail3;
258 	netif_dbg(efx, probe, efx->net_dev,
259 		  "workaround for bug 35388 is %sabled\n",
260 		  nic_data->workaround_35388 ? "en" : "dis");
261 
262 	rc = efx_mcdi_mon_probe(efx);
263 	if (rc)
264 		goto fail3;
265 
266 	return 0;
267 
268 fail3:
269 	efx_mcdi_fini(efx);
270 fail2:
271 	efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
272 fail1:
273 	kfree(nic_data);
274 	efx->nic_data = NULL;
275 	return rc;
276 }
277 
278 static int efx_ef10_free_vis(struct efx_nic *efx)
279 {
280 	int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
281 
282 	/* -EALREADY means nothing to free, so ignore */
283 	if (rc == -EALREADY)
284 		rc = 0;
285 	return rc;
286 }
287 
288 static void efx_ef10_remove(struct efx_nic *efx)
289 {
290 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
291 	int rc;
292 
293 	efx_mcdi_mon_remove(efx);
294 
295 	/* This needs to be after efx_ptp_remove_channel() with no filters */
296 	efx_ef10_rx_free_indir_table(efx);
297 
298 	rc = efx_ef10_free_vis(efx);
299 	WARN_ON(rc != 0);
300 
301 	efx_mcdi_fini(efx);
302 	efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
303 	kfree(nic_data);
304 }
305 
306 static int efx_ef10_alloc_vis(struct efx_nic *efx,
307 			      unsigned int min_vis, unsigned int max_vis)
308 {
309 	MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
310 	MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
311 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
312 	size_t outlen;
313 	int rc;
314 
315 	MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
316 	MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
317 	rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
318 			  outbuf, sizeof(outbuf), &outlen);
319 	if (rc != 0)
320 		return rc;
321 
322 	if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
323 		return -EIO;
324 
325 	netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
326 		  MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
327 
328 	nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
329 	nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
330 	return 0;
331 }
332 
333 static int efx_ef10_dimension_resources(struct efx_nic *efx)
334 {
335 	unsigned int n_vis =
336 		max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
337 
338 	return efx_ef10_alloc_vis(efx, n_vis, n_vis);
339 }
340 
341 static int efx_ef10_init_nic(struct efx_nic *efx)
342 {
343 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
344 	int rc;
345 
346 	if (nic_data->must_check_datapath_caps) {
347 		rc = efx_ef10_init_datapath_caps(efx);
348 		if (rc)
349 			return rc;
350 		nic_data->must_check_datapath_caps = false;
351 	}
352 
353 	if (nic_data->must_realloc_vis) {
354 		/* We cannot let the number of VIs change now */
355 		rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
356 					nic_data->n_allocated_vis);
357 		if (rc)
358 			return rc;
359 		nic_data->must_realloc_vis = false;
360 	}
361 
362 	efx_ef10_rx_push_indir_table(efx);
363 	return 0;
364 }
365 
366 static int efx_ef10_map_reset_flags(u32 *flags)
367 {
368 	enum {
369 		EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
370 				   ETH_RESET_SHARED_SHIFT),
371 		EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
372 				  ETH_RESET_OFFLOAD | ETH_RESET_MAC |
373 				  ETH_RESET_PHY | ETH_RESET_MGMT) <<
374 				 ETH_RESET_SHARED_SHIFT)
375 	};
376 
377 	/* We assume for now that our PCI function is permitted to
378 	 * reset everything.
379 	 */
380 
381 	if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
382 		*flags &= ~EF10_RESET_MC;
383 		return RESET_TYPE_WORLD;
384 	}
385 
386 	if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
387 		*flags &= ~EF10_RESET_PORT;
388 		return RESET_TYPE_ALL;
389 	}
390 
391 	/* no invisible reset implemented */
392 
393 	return -EINVAL;
394 }
395 
396 #define EF10_DMA_STAT(ext_name, mcdi_name)			\
397 	[EF10_STAT_ ## ext_name] =				\
398 	{ #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
399 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name)		\
400 	[EF10_STAT_ ## int_name] =				\
401 	{ NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
402 #define EF10_OTHER_STAT(ext_name)				\
403 	[EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
404 
405 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
406 	EF10_DMA_STAT(tx_bytes, TX_BYTES),
407 	EF10_DMA_STAT(tx_packets, TX_PKTS),
408 	EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
409 	EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
410 	EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
411 	EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
412 	EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
413 	EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
414 	EF10_DMA_STAT(tx_64, TX_64_PKTS),
415 	EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
416 	EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
417 	EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
418 	EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
419 	EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
420 	EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
421 	EF10_DMA_STAT(rx_bytes, RX_BYTES),
422 	EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
423 	EF10_OTHER_STAT(rx_good_bytes),
424 	EF10_OTHER_STAT(rx_bad_bytes),
425 	EF10_DMA_STAT(rx_packets, RX_PKTS),
426 	EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
427 	EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
428 	EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
429 	EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
430 	EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
431 	EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
432 	EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
433 	EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
434 	EF10_DMA_STAT(rx_64, RX_64_PKTS),
435 	EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
436 	EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
437 	EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
438 	EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
439 	EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
440 	EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
441 	EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
442 	EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
443 	EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
444 	EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
445 	EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
446 	EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
447 	EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
448 	EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
449 	EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
450 	EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
451 	EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
452 	EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
453 	EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
454 	EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
455 	EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
456 	EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
457 	EF10_DMA_STAT(rx_dp_emerg_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
458 	EF10_DMA_STAT(rx_dp_emerg_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
459 };
460 
461 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) |		\
462 			       (1ULL << EF10_STAT_tx_packets) |		\
463 			       (1ULL << EF10_STAT_tx_pause) |		\
464 			       (1ULL << EF10_STAT_tx_unicast) |		\
465 			       (1ULL << EF10_STAT_tx_multicast) |	\
466 			       (1ULL << EF10_STAT_tx_broadcast) |	\
467 			       (1ULL << EF10_STAT_rx_bytes) |		\
468 			       (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
469 			       (1ULL << EF10_STAT_rx_good_bytes) |	\
470 			       (1ULL << EF10_STAT_rx_bad_bytes) |	\
471 			       (1ULL << EF10_STAT_rx_packets) |		\
472 			       (1ULL << EF10_STAT_rx_good) |		\
473 			       (1ULL << EF10_STAT_rx_bad) |		\
474 			       (1ULL << EF10_STAT_rx_pause) |		\
475 			       (1ULL << EF10_STAT_rx_control) |		\
476 			       (1ULL << EF10_STAT_rx_unicast) |		\
477 			       (1ULL << EF10_STAT_rx_multicast) |	\
478 			       (1ULL << EF10_STAT_rx_broadcast) |	\
479 			       (1ULL << EF10_STAT_rx_lt64) |		\
480 			       (1ULL << EF10_STAT_rx_64) |		\
481 			       (1ULL << EF10_STAT_rx_65_to_127) |	\
482 			       (1ULL << EF10_STAT_rx_128_to_255) |	\
483 			       (1ULL << EF10_STAT_rx_256_to_511) |	\
484 			       (1ULL << EF10_STAT_rx_512_to_1023) |	\
485 			       (1ULL << EF10_STAT_rx_1024_to_15xx) |	\
486 			       (1ULL << EF10_STAT_rx_15xx_to_jumbo) |	\
487 			       (1ULL << EF10_STAT_rx_gtjumbo) |		\
488 			       (1ULL << EF10_STAT_rx_bad_gtjumbo) |	\
489 			       (1ULL << EF10_STAT_rx_overflow) |	\
490 			       (1ULL << EF10_STAT_rx_nodesc_drops))
491 
492 /* These statistics are only provided by the 10G MAC.  For a 10G/40G
493  * switchable port we do not expose these because they might not
494  * include all the packets they should.
495  */
496 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) |	\
497 				 (1ULL << EF10_STAT_tx_lt64) |		\
498 				 (1ULL << EF10_STAT_tx_64) |		\
499 				 (1ULL << EF10_STAT_tx_65_to_127) |	\
500 				 (1ULL << EF10_STAT_tx_128_to_255) |	\
501 				 (1ULL << EF10_STAT_tx_256_to_511) |	\
502 				 (1ULL << EF10_STAT_tx_512_to_1023) |	\
503 				 (1ULL << EF10_STAT_tx_1024_to_15xx) |	\
504 				 (1ULL << EF10_STAT_tx_15xx_to_jumbo))
505 
506 /* These statistics are only provided by the 40G MAC.  For a 10G/40G
507  * switchable port we do expose these because the errors will otherwise
508  * be silent.
509  */
510 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) |	\
511 				  (1ULL << EF10_STAT_rx_length_error))
512 
513 /* These statistics are only provided if the firmware supports the
514  * capability PM_AND_RXDP_COUNTERS.
515  */
516 #define HUNT_PM_AND_RXDP_STAT_MASK (					\
517 	(1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) |			\
518 	(1ULL << EF10_STAT_rx_pm_discard_bb_overflow) |			\
519 	(1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) |			\
520 	(1ULL << EF10_STAT_rx_pm_discard_vfifo_full) |			\
521 	(1ULL << EF10_STAT_rx_pm_trunc_qbb) |				\
522 	(1ULL << EF10_STAT_rx_pm_discard_qbb) |				\
523 	(1ULL << EF10_STAT_rx_pm_discard_mapping) |			\
524 	(1ULL << EF10_STAT_rx_dp_q_disabled_packets) |			\
525 	(1ULL << EF10_STAT_rx_dp_di_dropped_packets) |			\
526 	(1ULL << EF10_STAT_rx_dp_streaming_packets) |			\
527 	(1ULL << EF10_STAT_rx_dp_emerg_fetch) |				\
528 	(1ULL << EF10_STAT_rx_dp_emerg_wait))
529 
530 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
531 {
532 	u64 raw_mask = HUNT_COMMON_STAT_MASK;
533 	u32 port_caps = efx_mcdi_phy_get_caps(efx);
534 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
535 
536 	if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
537 		raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
538 	else
539 		raw_mask |= HUNT_10G_ONLY_STAT_MASK;
540 
541 	if (nic_data->datapath_caps &
542 	    (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
543 		raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
544 
545 	return raw_mask;
546 }
547 
548 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
549 {
550 	u64 raw_mask = efx_ef10_raw_stat_mask(efx);
551 
552 #if BITS_PER_LONG == 64
553 	mask[0] = raw_mask;
554 #else
555 	mask[0] = raw_mask & 0xffffffff;
556 	mask[1] = raw_mask >> 32;
557 #endif
558 }
559 
560 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
561 {
562 	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
563 
564 	efx_ef10_get_stat_mask(efx, mask);
565 	return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
566 				      mask, names);
567 }
568 
569 static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
570 {
571 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
572 	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
573 	__le64 generation_start, generation_end;
574 	u64 *stats = nic_data->stats;
575 	__le64 *dma_stats;
576 
577 	efx_ef10_get_stat_mask(efx, mask);
578 
579 	dma_stats = efx->stats_buffer.addr;
580 	nic_data = efx->nic_data;
581 
582 	generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
583 	if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
584 		return 0;
585 	rmb();
586 	efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
587 			     stats, efx->stats_buffer.addr, false);
588 	rmb();
589 	generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
590 	if (generation_end != generation_start)
591 		return -EAGAIN;
592 
593 	/* Update derived statistics */
594 	stats[EF10_STAT_rx_good_bytes] =
595 		stats[EF10_STAT_rx_bytes] -
596 		stats[EF10_STAT_rx_bytes_minus_good_bytes];
597 	efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
598 			     stats[EF10_STAT_rx_bytes_minus_good_bytes]);
599 
600 	return 0;
601 }
602 
603 
604 static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
605 				    struct rtnl_link_stats64 *core_stats)
606 {
607 	DECLARE_BITMAP(mask, EF10_STAT_COUNT);
608 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
609 	u64 *stats = nic_data->stats;
610 	size_t stats_count = 0, index;
611 	int retry;
612 
613 	efx_ef10_get_stat_mask(efx, mask);
614 
615 	/* If we're unlucky enough to read statistics during the DMA, wait
616 	 * up to 10ms for it to finish (typically takes <500us)
617 	 */
618 	for (retry = 0; retry < 100; ++retry) {
619 		if (efx_ef10_try_update_nic_stats(efx) == 0)
620 			break;
621 		udelay(100);
622 	}
623 
624 	if (full_stats) {
625 		for_each_set_bit(index, mask, EF10_STAT_COUNT) {
626 			if (efx_ef10_stat_desc[index].name) {
627 				*full_stats++ = stats[index];
628 				++stats_count;
629 			}
630 		}
631 	}
632 
633 	if (core_stats) {
634 		core_stats->rx_packets = stats[EF10_STAT_rx_packets];
635 		core_stats->tx_packets = stats[EF10_STAT_tx_packets];
636 		core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
637 		core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
638 		core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops];
639 		core_stats->multicast = stats[EF10_STAT_rx_multicast];
640 		core_stats->rx_length_errors =
641 			stats[EF10_STAT_rx_gtjumbo] +
642 			stats[EF10_STAT_rx_length_error];
643 		core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
644 		core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
645 		core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
646 		core_stats->rx_errors = (core_stats->rx_length_errors +
647 					 core_stats->rx_crc_errors +
648 					 core_stats->rx_frame_errors);
649 	}
650 
651 	return stats_count;
652 }
653 
654 static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
655 {
656 	struct efx_nic *efx = channel->efx;
657 	unsigned int mode, value;
658 	efx_dword_t timer_cmd;
659 
660 	if (channel->irq_moderation) {
661 		mode = 3;
662 		value = channel->irq_moderation - 1;
663 	} else {
664 		mode = 0;
665 		value = 0;
666 	}
667 
668 	if (EFX_EF10_WORKAROUND_35388(efx)) {
669 		EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
670 				     EFE_DD_EVQ_IND_TIMER_FLAGS,
671 				     ERF_DD_EVQ_IND_TIMER_MODE, mode,
672 				     ERF_DD_EVQ_IND_TIMER_VAL, value);
673 		efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
674 				channel->channel);
675 	} else {
676 		EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
677 				     ERF_DZ_TC_TIMER_VAL, value);
678 		efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
679 				channel->channel);
680 	}
681 }
682 
683 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
684 {
685 	wol->supported = 0;
686 	wol->wolopts = 0;
687 	memset(&wol->sopass, 0, sizeof(wol->sopass));
688 }
689 
690 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
691 {
692 	if (type != 0)
693 		return -EINVAL;
694 	return 0;
695 }
696 
697 static void efx_ef10_mcdi_request(struct efx_nic *efx,
698 				  const efx_dword_t *hdr, size_t hdr_len,
699 				  const efx_dword_t *sdu, size_t sdu_len)
700 {
701 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
702 	u8 *pdu = nic_data->mcdi_buf.addr;
703 
704 	memcpy(pdu, hdr, hdr_len);
705 	memcpy(pdu + hdr_len, sdu, sdu_len);
706 	wmb();
707 
708 	/* The hardware provides 'low' and 'high' (doorbell) registers
709 	 * for passing the 64-bit address of an MCDI request to
710 	 * firmware.  However the dwords are swapped by firmware.  The
711 	 * least significant bits of the doorbell are then 0 for all
712 	 * MCDI requests due to alignment.
713 	 */
714 	_efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
715 		    ER_DZ_MC_DB_LWRD);
716 	_efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
717 		    ER_DZ_MC_DB_HWRD);
718 }
719 
720 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
721 {
722 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
723 	const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
724 
725 	rmb();
726 	return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
727 }
728 
729 static void
730 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
731 			    size_t offset, size_t outlen)
732 {
733 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
734 	const u8 *pdu = nic_data->mcdi_buf.addr;
735 
736 	memcpy(outbuf, pdu + offset, outlen);
737 }
738 
739 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
740 {
741 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
742 	int rc;
743 
744 	rc = efx_ef10_get_warm_boot_count(efx);
745 	if (rc < 0) {
746 		/* The firmware is presumably in the process of
747 		 * rebooting.  However, we are supposed to report each
748 		 * reboot just once, so we must only do that once we
749 		 * can read and store the updated warm boot count.
750 		 */
751 		return 0;
752 	}
753 
754 	if (rc == nic_data->warm_boot_count)
755 		return 0;
756 
757 	nic_data->warm_boot_count = rc;
758 
759 	/* All our allocations have been reset */
760 	nic_data->must_realloc_vis = true;
761 	nic_data->must_restore_filters = true;
762 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
763 
764 	/* The datapath firmware might have been changed */
765 	nic_data->must_check_datapath_caps = true;
766 
767 	/* MAC statistics have been cleared on the NIC; clear the local
768 	 * statistic that we update with efx_update_diff_stat().
769 	 */
770 	nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
771 
772 	return -EIO;
773 }
774 
775 /* Handle an MSI interrupt
776  *
777  * Handle an MSI hardware interrupt.  This routine schedules event
778  * queue processing.  No interrupt acknowledgement cycle is necessary.
779  * Also, we never need to check that the interrupt is for us, since
780  * MSI interrupts cannot be shared.
781  */
782 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
783 {
784 	struct efx_msi_context *context = dev_id;
785 	struct efx_nic *efx = context->efx;
786 
787 	netif_vdbg(efx, intr, efx->net_dev,
788 		   "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
789 
790 	if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
791 		/* Note test interrupts */
792 		if (context->index == efx->irq_level)
793 			efx->last_irq_cpu = raw_smp_processor_id();
794 
795 		/* Schedule processing of the channel */
796 		efx_schedule_channel_irq(efx->channel[context->index]);
797 	}
798 
799 	return IRQ_HANDLED;
800 }
801 
802 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
803 {
804 	struct efx_nic *efx = dev_id;
805 	bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
806 	struct efx_channel *channel;
807 	efx_dword_t reg;
808 	u32 queues;
809 
810 	/* Read the ISR which also ACKs the interrupts */
811 	efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
812 	queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
813 
814 	if (queues == 0)
815 		return IRQ_NONE;
816 
817 	if (likely(soft_enabled)) {
818 		/* Note test interrupts */
819 		if (queues & (1U << efx->irq_level))
820 			efx->last_irq_cpu = raw_smp_processor_id();
821 
822 		efx_for_each_channel(channel, efx) {
823 			if (queues & 1)
824 				efx_schedule_channel_irq(channel);
825 			queues >>= 1;
826 		}
827 	}
828 
829 	netif_vdbg(efx, intr, efx->net_dev,
830 		   "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
831 		   irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
832 
833 	return IRQ_HANDLED;
834 }
835 
836 static void efx_ef10_irq_test_generate(struct efx_nic *efx)
837 {
838 	MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
839 
840 	BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
841 
842 	MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
843 	(void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
844 			    inbuf, sizeof(inbuf), NULL, 0, NULL);
845 }
846 
847 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
848 {
849 	return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
850 				    (tx_queue->ptr_mask + 1) *
851 				    sizeof(efx_qword_t),
852 				    GFP_KERNEL);
853 }
854 
855 /* This writes to the TX_DESC_WPTR and also pushes data */
856 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
857 					 const efx_qword_t *txd)
858 {
859 	unsigned int write_ptr;
860 	efx_oword_t reg;
861 
862 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
863 	EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
864 	reg.qword[0] = *txd;
865 	efx_writeo_page(tx_queue->efx, &reg,
866 			ER_DZ_TX_DESC_UPD, tx_queue->queue);
867 }
868 
869 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
870 {
871 	MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
872 						       EFX_BUF_SIZE));
873 	MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
874 	bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
875 	size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
876 	struct efx_channel *channel = tx_queue->channel;
877 	struct efx_nic *efx = tx_queue->efx;
878 	size_t inlen, outlen;
879 	dma_addr_t dma_addr;
880 	efx_qword_t *txd;
881 	int rc;
882 	int i;
883 
884 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
885 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
886 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
887 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
888 	MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
889 			      INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
890 			      INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
891 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
892 	MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
893 
894 	dma_addr = tx_queue->txd.buf.dma_addr;
895 
896 	netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
897 		  tx_queue->queue, entries, (u64)dma_addr);
898 
899 	for (i = 0; i < entries; ++i) {
900 		MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
901 		dma_addr += EFX_BUF_SIZE;
902 	}
903 
904 	inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
905 
906 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
907 			  outbuf, sizeof(outbuf), &outlen);
908 	if (rc)
909 		goto fail;
910 
911 	/* A previous user of this TX queue might have set us up the
912 	 * bomb by writing a descriptor to the TX push collector but
913 	 * not the doorbell.  (Each collector belongs to a port, not a
914 	 * queue or function, so cannot easily be reset.)  We must
915 	 * attempt to push a no-op descriptor in its place.
916 	 */
917 	tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
918 	tx_queue->insert_count = 1;
919 	txd = efx_tx_desc(tx_queue, 0);
920 	EFX_POPULATE_QWORD_4(*txd,
921 			     ESF_DZ_TX_DESC_IS_OPT, true,
922 			     ESF_DZ_TX_OPTION_TYPE,
923 			     ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
924 			     ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
925 			     ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
926 	tx_queue->write_count = 1;
927 	wmb();
928 	efx_ef10_push_tx_desc(tx_queue, txd);
929 
930 	return;
931 
932 fail:
933 	WARN_ON(true);
934 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
935 }
936 
937 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
938 {
939 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
940 	MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
941 	struct efx_nic *efx = tx_queue->efx;
942 	size_t outlen;
943 	int rc;
944 
945 	MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
946 		       tx_queue->queue);
947 
948 	rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
949 			  outbuf, sizeof(outbuf), &outlen);
950 
951 	if (rc && rc != -EALREADY)
952 		goto fail;
953 
954 	return;
955 
956 fail:
957 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
958 }
959 
960 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
961 {
962 	efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
963 }
964 
965 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
966 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
967 {
968 	unsigned int write_ptr;
969 	efx_dword_t reg;
970 
971 	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
972 	EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
973 	efx_writed_page(tx_queue->efx, &reg,
974 			ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
975 }
976 
977 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
978 {
979 	unsigned int old_write_count = tx_queue->write_count;
980 	struct efx_tx_buffer *buffer;
981 	unsigned int write_ptr;
982 	efx_qword_t *txd;
983 
984 	BUG_ON(tx_queue->write_count == tx_queue->insert_count);
985 
986 	do {
987 		write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
988 		buffer = &tx_queue->buffer[write_ptr];
989 		txd = efx_tx_desc(tx_queue, write_ptr);
990 		++tx_queue->write_count;
991 
992 		/* Create TX descriptor ring entry */
993 		if (buffer->flags & EFX_TX_BUF_OPTION) {
994 			*txd = buffer->option;
995 		} else {
996 			BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
997 			EFX_POPULATE_QWORD_3(
998 				*txd,
999 				ESF_DZ_TX_KER_CONT,
1000 				buffer->flags & EFX_TX_BUF_CONT,
1001 				ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
1002 				ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
1003 		}
1004 	} while (tx_queue->write_count != tx_queue->insert_count);
1005 
1006 	wmb(); /* Ensure descriptors are written before they are fetched */
1007 
1008 	if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
1009 		txd = efx_tx_desc(tx_queue,
1010 				  old_write_count & tx_queue->ptr_mask);
1011 		efx_ef10_push_tx_desc(tx_queue, txd);
1012 		++tx_queue->pushes;
1013 	} else {
1014 		efx_ef10_notify_tx_desc(tx_queue);
1015 	}
1016 }
1017 
1018 static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
1019 {
1020 	MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
1021 	MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
1022 	size_t outlen;
1023 	int rc;
1024 
1025 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
1026 		       EVB_PORT_ID_ASSIGNED);
1027 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
1028 		       MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
1029 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
1030 		       EFX_MAX_CHANNELS);
1031 
1032 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
1033 		outbuf, sizeof(outbuf), &outlen);
1034 	if (rc != 0)
1035 		return rc;
1036 
1037 	if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
1038 		return -EIO;
1039 
1040 	*context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
1041 
1042 	return 0;
1043 }
1044 
1045 static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
1046 {
1047 	MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
1048 	int rc;
1049 
1050 	MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
1051 		       context);
1052 
1053 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
1054 			    NULL, 0, NULL);
1055 	WARN_ON(rc != 0);
1056 }
1057 
1058 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
1059 {
1060 	MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
1061 	MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
1062 	int i, rc;
1063 
1064 	MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
1065 		       context);
1066 	BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1067 		     MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
1068 
1069 	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
1070 		MCDI_PTR(tablebuf,
1071 			 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
1072 				(u8) efx->rx_indir_table[i];
1073 
1074 	rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
1075 			  sizeof(tablebuf), NULL, 0, NULL);
1076 	if (rc != 0)
1077 		return rc;
1078 
1079 	MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
1080 		       context);
1081 	BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
1082 		     MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
1083 	for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
1084 		MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
1085 			efx->rx_hash_key[i];
1086 
1087 	return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
1088 			    sizeof(keybuf), NULL, 0, NULL);
1089 }
1090 
1091 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
1092 {
1093 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1094 
1095 	if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
1096 		efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
1097 	nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1098 }
1099 
1100 static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
1101 {
1102 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1103 	int rc;
1104 
1105 	netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
1106 
1107 	if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
1108 		rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
1109 		if (rc != 0)
1110 			goto fail;
1111 	}
1112 
1113 	rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
1114 	if (rc != 0)
1115 		goto fail;
1116 
1117 	return;
1118 
1119 fail:
1120 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1121 }
1122 
1123 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
1124 {
1125 	return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
1126 				    (rx_queue->ptr_mask + 1) *
1127 				    sizeof(efx_qword_t),
1128 				    GFP_KERNEL);
1129 }
1130 
1131 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
1132 {
1133 	MCDI_DECLARE_BUF(inbuf,
1134 			 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1135 						EFX_BUF_SIZE));
1136 	MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
1137 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1138 	size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
1139 	struct efx_nic *efx = rx_queue->efx;
1140 	size_t inlen, outlen;
1141 	dma_addr_t dma_addr;
1142 	int rc;
1143 	int i;
1144 
1145 	rx_queue->scatter_n = 0;
1146 	rx_queue->scatter_len = 0;
1147 
1148 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
1149 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
1150 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
1151 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
1152 		       efx_rx_queue_index(rx_queue));
1153 	MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
1154 			      INIT_RXQ_IN_FLAG_PREFIX, 1);
1155 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
1156 	MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1157 
1158 	dma_addr = rx_queue->rxd.buf.dma_addr;
1159 
1160 	netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
1161 		  efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
1162 
1163 	for (i = 0; i < entries; ++i) {
1164 		MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
1165 		dma_addr += EFX_BUF_SIZE;
1166 	}
1167 
1168 	inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
1169 
1170 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
1171 			  outbuf, sizeof(outbuf), &outlen);
1172 	if (rc)
1173 		goto fail;
1174 
1175 	return;
1176 
1177 fail:
1178 	WARN_ON(true);
1179 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1180 }
1181 
1182 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
1183 {
1184 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
1185 	MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
1186 	struct efx_nic *efx = rx_queue->efx;
1187 	size_t outlen;
1188 	int rc;
1189 
1190 	MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
1191 		       efx_rx_queue_index(rx_queue));
1192 
1193 	rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
1194 			  outbuf, sizeof(outbuf), &outlen);
1195 
1196 	if (rc && rc != -EALREADY)
1197 		goto fail;
1198 
1199 	return;
1200 
1201 fail:
1202 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1203 }
1204 
1205 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
1206 {
1207 	efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
1208 }
1209 
1210 /* This creates an entry in the RX descriptor queue */
1211 static inline void
1212 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
1213 {
1214 	struct efx_rx_buffer *rx_buf;
1215 	efx_qword_t *rxd;
1216 
1217 	rxd = efx_rx_desc(rx_queue, index);
1218 	rx_buf = efx_rx_buffer(rx_queue, index);
1219 	EFX_POPULATE_QWORD_2(*rxd,
1220 			     ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
1221 			     ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
1222 }
1223 
1224 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
1225 {
1226 	struct efx_nic *efx = rx_queue->efx;
1227 	unsigned int write_count;
1228 	efx_dword_t reg;
1229 
1230 	/* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
1231 	write_count = rx_queue->added_count & ~7;
1232 	if (rx_queue->notified_count == write_count)
1233 		return;
1234 
1235 	do
1236 		efx_ef10_build_rx_desc(
1237 			rx_queue,
1238 			rx_queue->notified_count & rx_queue->ptr_mask);
1239 	while (++rx_queue->notified_count != write_count);
1240 
1241 	wmb();
1242 	EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
1243 			     write_count & rx_queue->ptr_mask);
1244 	efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
1245 			efx_rx_queue_index(rx_queue));
1246 }
1247 
1248 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
1249 
1250 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
1251 {
1252 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
1253 	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
1254 	efx_qword_t event;
1255 
1256 	EFX_POPULATE_QWORD_2(event,
1257 			     ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
1258 			     ESF_DZ_EV_DATA, EFX_EF10_REFILL);
1259 
1260 	MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
1261 
1262 	/* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
1263 	 * already swapped the data to little-endian order.
1264 	 */
1265 	memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
1266 	       sizeof(efx_qword_t));
1267 
1268 	efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
1269 			   inbuf, sizeof(inbuf), 0,
1270 			   efx_ef10_rx_defer_refill_complete, 0);
1271 }
1272 
1273 static void
1274 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
1275 				  int rc, efx_dword_t *outbuf,
1276 				  size_t outlen_actual)
1277 {
1278 	/* nothing to do */
1279 }
1280 
1281 static int efx_ef10_ev_probe(struct efx_channel *channel)
1282 {
1283 	return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
1284 				    (channel->eventq_mask + 1) *
1285 				    sizeof(efx_qword_t),
1286 				    GFP_KERNEL);
1287 }
1288 
1289 static int efx_ef10_ev_init(struct efx_channel *channel)
1290 {
1291 	MCDI_DECLARE_BUF(inbuf,
1292 			 MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
1293 						EFX_BUF_SIZE));
1294 	MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
1295 	size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
1296 	struct efx_nic *efx = channel->efx;
1297 	struct efx_ef10_nic_data *nic_data;
1298 	bool supports_rx_merge;
1299 	size_t inlen, outlen;
1300 	dma_addr_t dma_addr;
1301 	int rc;
1302 	int i;
1303 
1304 	nic_data = efx->nic_data;
1305 	supports_rx_merge =
1306 		!!(nic_data->datapath_caps &
1307 		   1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
1308 
1309 	/* Fill event queue with all ones (i.e. empty events) */
1310 	memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1311 
1312 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
1313 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
1314 	/* INIT_EVQ expects index in vector table, not absolute */
1315 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
1316 	MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
1317 			      INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
1318 			      INIT_EVQ_IN_FLAG_RX_MERGE, 1,
1319 			      INIT_EVQ_IN_FLAG_TX_MERGE, 1,
1320 			      INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
1321 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
1322 		       MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
1323 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
1324 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
1325 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
1326 		       MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
1327 	MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
1328 
1329 	dma_addr = channel->eventq.buf.dma_addr;
1330 	for (i = 0; i < entries; ++i) {
1331 		MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
1332 		dma_addr += EFX_BUF_SIZE;
1333 	}
1334 
1335 	inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
1336 
1337 	rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
1338 			  outbuf, sizeof(outbuf), &outlen);
1339 	if (rc)
1340 		goto fail;
1341 
1342 	/* IRQ return is ignored */
1343 
1344 	return 0;
1345 
1346 fail:
1347 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1348 	return rc;
1349 }
1350 
1351 static void efx_ef10_ev_fini(struct efx_channel *channel)
1352 {
1353 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
1354 	MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
1355 	struct efx_nic *efx = channel->efx;
1356 	size_t outlen;
1357 	int rc;
1358 
1359 	MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
1360 
1361 	rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
1362 			  outbuf, sizeof(outbuf), &outlen);
1363 
1364 	if (rc && rc != -EALREADY)
1365 		goto fail;
1366 
1367 	return;
1368 
1369 fail:
1370 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1371 }
1372 
1373 static void efx_ef10_ev_remove(struct efx_channel *channel)
1374 {
1375 	efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
1376 }
1377 
1378 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
1379 					   unsigned int rx_queue_label)
1380 {
1381 	struct efx_nic *efx = rx_queue->efx;
1382 
1383 	netif_info(efx, hw, efx->net_dev,
1384 		   "rx event arrived on queue %d labeled as queue %u\n",
1385 		   efx_rx_queue_index(rx_queue), rx_queue_label);
1386 
1387 	efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1388 }
1389 
1390 static void
1391 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
1392 			     unsigned int actual, unsigned int expected)
1393 {
1394 	unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
1395 	struct efx_nic *efx = rx_queue->efx;
1396 
1397 	netif_info(efx, hw, efx->net_dev,
1398 		   "dropped %d events (index=%d expected=%d)\n",
1399 		   dropped, actual, expected);
1400 
1401 	efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1402 }
1403 
1404 /* partially received RX was aborted. clean up. */
1405 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
1406 {
1407 	unsigned int rx_desc_ptr;
1408 
1409 	WARN_ON(rx_queue->scatter_n == 0);
1410 
1411 	netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
1412 		  "scattered RX aborted (dropping %u buffers)\n",
1413 		  rx_queue->scatter_n);
1414 
1415 	rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
1416 
1417 	efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
1418 		      0, EFX_RX_PKT_DISCARD);
1419 
1420 	rx_queue->removed_count += rx_queue->scatter_n;
1421 	rx_queue->scatter_n = 0;
1422 	rx_queue->scatter_len = 0;
1423 	++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
1424 }
1425 
1426 static int efx_ef10_handle_rx_event(struct efx_channel *channel,
1427 				    const efx_qword_t *event)
1428 {
1429 	unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
1430 	unsigned int n_descs, n_packets, i;
1431 	struct efx_nic *efx = channel->efx;
1432 	struct efx_rx_queue *rx_queue;
1433 	bool rx_cont;
1434 	u16 flags = 0;
1435 
1436 	if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1437 		return 0;
1438 
1439 	/* Basic packet information */
1440 	rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
1441 	next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
1442 	rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
1443 	rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
1444 	rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
1445 
1446 	WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
1447 
1448 	rx_queue = efx_channel_get_rx_queue(channel);
1449 
1450 	if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
1451 		efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
1452 
1453 	n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
1454 		   ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1455 
1456 	if (n_descs != rx_queue->scatter_n + 1) {
1457 		/* detect rx abort */
1458 		if (unlikely(n_descs == rx_queue->scatter_n)) {
1459 			WARN_ON(rx_bytes != 0);
1460 			efx_ef10_handle_rx_abort(rx_queue);
1461 			return 0;
1462 		}
1463 
1464 		if (unlikely(rx_queue->scatter_n != 0)) {
1465 			/* Scattered packet completions cannot be
1466 			 * merged, so something has gone wrong.
1467 			 */
1468 			efx_ef10_handle_rx_bad_lbits(
1469 				rx_queue, next_ptr_lbits,
1470 				(rx_queue->removed_count +
1471 				 rx_queue->scatter_n + 1) &
1472 				((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
1473 			return 0;
1474 		}
1475 
1476 		/* Merged completion for multiple non-scattered packets */
1477 		rx_queue->scatter_n = 1;
1478 		rx_queue->scatter_len = 0;
1479 		n_packets = n_descs;
1480 		++channel->n_rx_merge_events;
1481 		channel->n_rx_merge_packets += n_packets;
1482 		flags |= EFX_RX_PKT_PREFIX_LEN;
1483 	} else {
1484 		++rx_queue->scatter_n;
1485 		rx_queue->scatter_len += rx_bytes;
1486 		if (rx_cont)
1487 			return 0;
1488 		n_packets = 1;
1489 	}
1490 
1491 	if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
1492 		flags |= EFX_RX_PKT_DISCARD;
1493 
1494 	if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
1495 		channel->n_rx_ip_hdr_chksum_err += n_packets;
1496 	} else if (unlikely(EFX_QWORD_FIELD(*event,
1497 					    ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
1498 		channel->n_rx_tcp_udp_chksum_err += n_packets;
1499 	} else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
1500 		   rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
1501 		flags |= EFX_RX_PKT_CSUMMED;
1502 	}
1503 
1504 	if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
1505 		flags |= EFX_RX_PKT_TCP;
1506 
1507 	channel->irq_mod_score += 2 * n_packets;
1508 
1509 	/* Handle received packet(s) */
1510 	for (i = 0; i < n_packets; i++) {
1511 		efx_rx_packet(rx_queue,
1512 			      rx_queue->removed_count & rx_queue->ptr_mask,
1513 			      rx_queue->scatter_n, rx_queue->scatter_len,
1514 			      flags);
1515 		rx_queue->removed_count += rx_queue->scatter_n;
1516 	}
1517 
1518 	rx_queue->scatter_n = 0;
1519 	rx_queue->scatter_len = 0;
1520 
1521 	return n_packets;
1522 }
1523 
1524 static int
1525 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
1526 {
1527 	struct efx_nic *efx = channel->efx;
1528 	struct efx_tx_queue *tx_queue;
1529 	unsigned int tx_ev_desc_ptr;
1530 	unsigned int tx_ev_q_label;
1531 	int tx_descs = 0;
1532 
1533 	if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1534 		return 0;
1535 
1536 	if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
1537 		return 0;
1538 
1539 	/* Transmit completion */
1540 	tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
1541 	tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
1542 	tx_queue = efx_channel_get_tx_queue(channel,
1543 					    tx_ev_q_label % EFX_TXQ_TYPES);
1544 	tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
1545 		    tx_queue->ptr_mask);
1546 	efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
1547 
1548 	return tx_descs;
1549 }
1550 
1551 static void
1552 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
1553 {
1554 	struct efx_nic *efx = channel->efx;
1555 	int subcode;
1556 
1557 	subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
1558 
1559 	switch (subcode) {
1560 	case ESE_DZ_DRV_TIMER_EV:
1561 	case ESE_DZ_DRV_WAKE_UP_EV:
1562 		break;
1563 	case ESE_DZ_DRV_START_UP_EV:
1564 		/* event queue init complete. ok. */
1565 		break;
1566 	default:
1567 		netif_err(efx, hw, efx->net_dev,
1568 			  "channel %d unknown driver event type %d"
1569 			  " (data " EFX_QWORD_FMT ")\n",
1570 			  channel->channel, subcode,
1571 			  EFX_QWORD_VAL(*event));
1572 
1573 	}
1574 }
1575 
1576 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
1577 						   efx_qword_t *event)
1578 {
1579 	struct efx_nic *efx = channel->efx;
1580 	u32 subcode;
1581 
1582 	subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
1583 
1584 	switch (subcode) {
1585 	case EFX_EF10_TEST:
1586 		channel->event_test_cpu = raw_smp_processor_id();
1587 		break;
1588 	case EFX_EF10_REFILL:
1589 		/* The queue must be empty, so we won't receive any rx
1590 		 * events, so efx_process_channel() won't refill the
1591 		 * queue. Refill it here
1592 		 */
1593 		efx_fast_push_rx_descriptors(&channel->rx_queue);
1594 		break;
1595 	default:
1596 		netif_err(efx, hw, efx->net_dev,
1597 			  "channel %d unknown driver event type %u"
1598 			  " (data " EFX_QWORD_FMT ")\n",
1599 			  channel->channel, (unsigned) subcode,
1600 			  EFX_QWORD_VAL(*event));
1601 	}
1602 }
1603 
1604 static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
1605 {
1606 	struct efx_nic *efx = channel->efx;
1607 	efx_qword_t event, *p_event;
1608 	unsigned int read_ptr;
1609 	int ev_code;
1610 	int tx_descs = 0;
1611 	int spent = 0;
1612 
1613 	read_ptr = channel->eventq_read_ptr;
1614 
1615 	for (;;) {
1616 		p_event = efx_event(channel, read_ptr);
1617 		event = *p_event;
1618 
1619 		if (!efx_event_present(&event))
1620 			break;
1621 
1622 		EFX_SET_QWORD(*p_event);
1623 
1624 		++read_ptr;
1625 
1626 		ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
1627 
1628 		netif_vdbg(efx, drv, efx->net_dev,
1629 			   "processing event on %d " EFX_QWORD_FMT "\n",
1630 			   channel->channel, EFX_QWORD_VAL(event));
1631 
1632 		switch (ev_code) {
1633 		case ESE_DZ_EV_CODE_MCDI_EV:
1634 			efx_mcdi_process_event(channel, &event);
1635 			break;
1636 		case ESE_DZ_EV_CODE_RX_EV:
1637 			spent += efx_ef10_handle_rx_event(channel, &event);
1638 			if (spent >= quota) {
1639 				/* XXX can we split a merged event to
1640 				 * avoid going over-quota?
1641 				 */
1642 				spent = quota;
1643 				goto out;
1644 			}
1645 			break;
1646 		case ESE_DZ_EV_CODE_TX_EV:
1647 			tx_descs += efx_ef10_handle_tx_event(channel, &event);
1648 			if (tx_descs > efx->txq_entries) {
1649 				spent = quota;
1650 				goto out;
1651 			} else if (++spent == quota) {
1652 				goto out;
1653 			}
1654 			break;
1655 		case ESE_DZ_EV_CODE_DRIVER_EV:
1656 			efx_ef10_handle_driver_event(channel, &event);
1657 			if (++spent == quota)
1658 				goto out;
1659 			break;
1660 		case EFX_EF10_DRVGEN_EV:
1661 			efx_ef10_handle_driver_generated_event(channel, &event);
1662 			break;
1663 		default:
1664 			netif_err(efx, hw, efx->net_dev,
1665 				  "channel %d unknown event type %d"
1666 				  " (data " EFX_QWORD_FMT ")\n",
1667 				  channel->channel, ev_code,
1668 				  EFX_QWORD_VAL(event));
1669 		}
1670 	}
1671 
1672 out:
1673 	channel->eventq_read_ptr = read_ptr;
1674 	return spent;
1675 }
1676 
1677 static void efx_ef10_ev_read_ack(struct efx_channel *channel)
1678 {
1679 	struct efx_nic *efx = channel->efx;
1680 	efx_dword_t rptr;
1681 
1682 	if (EFX_EF10_WORKAROUND_35388(efx)) {
1683 		BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
1684 			     (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
1685 		BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
1686 			     (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
1687 
1688 		EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
1689 				     EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
1690 				     ERF_DD_EVQ_IND_RPTR,
1691 				     (channel->eventq_read_ptr &
1692 				      channel->eventq_mask) >>
1693 				     ERF_DD_EVQ_IND_RPTR_WIDTH);
1694 		efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
1695 				channel->channel);
1696 		EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
1697 				     EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
1698 				     ERF_DD_EVQ_IND_RPTR,
1699 				     channel->eventq_read_ptr &
1700 				     ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
1701 		efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
1702 				channel->channel);
1703 	} else {
1704 		EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
1705 				     channel->eventq_read_ptr &
1706 				     channel->eventq_mask);
1707 		efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
1708 	}
1709 }
1710 
1711 static void efx_ef10_ev_test_generate(struct efx_channel *channel)
1712 {
1713 	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
1714 	struct efx_nic *efx = channel->efx;
1715 	efx_qword_t event;
1716 	int rc;
1717 
1718 	EFX_POPULATE_QWORD_2(event,
1719 			     ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
1720 			     ESF_DZ_EV_DATA, EFX_EF10_TEST);
1721 
1722 	MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
1723 
1724 	/* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
1725 	 * already swapped the data to little-endian order.
1726 	 */
1727 	memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
1728 	       sizeof(efx_qword_t));
1729 
1730 	rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
1731 			  NULL, 0, NULL);
1732 	if (rc != 0)
1733 		goto fail;
1734 
1735 	return;
1736 
1737 fail:
1738 	WARN_ON(true);
1739 	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
1740 }
1741 
1742 void efx_ef10_handle_drain_event(struct efx_nic *efx)
1743 {
1744 	if (atomic_dec_and_test(&efx->active_queues))
1745 		wake_up(&efx->flush_wq);
1746 
1747 	WARN_ON(atomic_read(&efx->active_queues) < 0);
1748 }
1749 
1750 static int efx_ef10_fini_dmaq(struct efx_nic *efx)
1751 {
1752 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1753 	struct efx_channel *channel;
1754 	struct efx_tx_queue *tx_queue;
1755 	struct efx_rx_queue *rx_queue;
1756 	int pending;
1757 
1758 	/* If the MC has just rebooted, the TX/RX queues will have already been
1759 	 * torn down, but efx->active_queues needs to be set to zero.
1760 	 */
1761 	if (nic_data->must_realloc_vis) {
1762 		atomic_set(&efx->active_queues, 0);
1763 		return 0;
1764 	}
1765 
1766 	/* Do not attempt to write to the NIC during EEH recovery */
1767 	if (efx->state != STATE_RECOVERY) {
1768 		efx_for_each_channel(channel, efx) {
1769 			efx_for_each_channel_rx_queue(rx_queue, channel)
1770 				efx_ef10_rx_fini(rx_queue);
1771 			efx_for_each_channel_tx_queue(tx_queue, channel)
1772 				efx_ef10_tx_fini(tx_queue);
1773 		}
1774 
1775 		wait_event_timeout(efx->flush_wq,
1776 				   atomic_read(&efx->active_queues) == 0,
1777 				   msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
1778 		pending = atomic_read(&efx->active_queues);
1779 		if (pending) {
1780 			netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
1781 				  pending);
1782 			return -ETIMEDOUT;
1783 		}
1784 	}
1785 
1786 	return 0;
1787 }
1788 
1789 static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
1790 				  const struct efx_filter_spec *right)
1791 {
1792 	if ((left->match_flags ^ right->match_flags) |
1793 	    ((left->flags ^ right->flags) &
1794 	     (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
1795 		return false;
1796 
1797 	return memcmp(&left->outer_vid, &right->outer_vid,
1798 		      sizeof(struct efx_filter_spec) -
1799 		      offsetof(struct efx_filter_spec, outer_vid)) == 0;
1800 }
1801 
1802 static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
1803 {
1804 	BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
1805 	return jhash2((const u32 *)&spec->outer_vid,
1806 		      (sizeof(struct efx_filter_spec) -
1807 		       offsetof(struct efx_filter_spec, outer_vid)) / 4,
1808 		      0);
1809 	/* XXX should we randomise the initval? */
1810 }
1811 
1812 /* Decide whether a filter should be exclusive or else should allow
1813  * delivery to additional recipients.  Currently we decide that
1814  * filters for specific local unicast MAC and IP addresses are
1815  * exclusive.
1816  */
1817 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
1818 {
1819 	if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
1820 	    !is_multicast_ether_addr(spec->loc_mac))
1821 		return true;
1822 
1823 	if ((spec->match_flags &
1824 	     (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
1825 	    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
1826 		if (spec->ether_type == htons(ETH_P_IP) &&
1827 		    !ipv4_is_multicast(spec->loc_host[0]))
1828 			return true;
1829 		if (spec->ether_type == htons(ETH_P_IPV6) &&
1830 		    ((const u8 *)spec->loc_host)[0] != 0xff)
1831 			return true;
1832 	}
1833 
1834 	return false;
1835 }
1836 
1837 static struct efx_filter_spec *
1838 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
1839 			   unsigned int filter_idx)
1840 {
1841 	return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
1842 					  ~EFX_EF10_FILTER_FLAGS);
1843 }
1844 
1845 static unsigned int
1846 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
1847 			   unsigned int filter_idx)
1848 {
1849 	return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
1850 }
1851 
1852 static void
1853 efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
1854 			  unsigned int filter_idx,
1855 			  const struct efx_filter_spec *spec,
1856 			  unsigned int flags)
1857 {
1858 	table->entry[filter_idx].spec =	(unsigned long)spec | flags;
1859 }
1860 
1861 static void efx_ef10_filter_push_prep(struct efx_nic *efx,
1862 				      const struct efx_filter_spec *spec,
1863 				      efx_dword_t *inbuf, u64 handle,
1864 				      bool replacing)
1865 {
1866 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
1867 
1868 	memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
1869 
1870 	if (replacing) {
1871 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
1872 			       MC_CMD_FILTER_OP_IN_OP_REPLACE);
1873 		MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
1874 	} else {
1875 		u32 match_fields = 0;
1876 
1877 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
1878 			       efx_ef10_filter_is_exclusive(spec) ?
1879 			       MC_CMD_FILTER_OP_IN_OP_INSERT :
1880 			       MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
1881 
1882 		/* Convert match flags and values.  Unlike almost
1883 		 * everything else in MCDI, these fields are in
1884 		 * network byte order.
1885 		 */
1886 		if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
1887 			match_fields |=
1888 				is_multicast_ether_addr(spec->loc_mac) ?
1889 				1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
1890 				1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
1891 #define COPY_FIELD(gen_flag, gen_field, mcdi_field)			     \
1892 		if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) {     \
1893 			match_fields |=					     \
1894 				1 << MC_CMD_FILTER_OP_IN_MATCH_ ##	     \
1895 				mcdi_field ## _LBN;			     \
1896 			BUILD_BUG_ON(					     \
1897 				MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
1898 				sizeof(spec->gen_field));		     \
1899 			memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ##	mcdi_field), \
1900 			       &spec->gen_field, sizeof(spec->gen_field));   \
1901 		}
1902 		COPY_FIELD(REM_HOST, rem_host, SRC_IP);
1903 		COPY_FIELD(LOC_HOST, loc_host, DST_IP);
1904 		COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
1905 		COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
1906 		COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
1907 		COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
1908 		COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
1909 		COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
1910 		COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
1911 		COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
1912 #undef COPY_FIELD
1913 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
1914 			       match_fields);
1915 	}
1916 
1917 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1918 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
1919 		       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
1920 		       MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
1921 		       MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
1922 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
1923 		       MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
1924 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
1925 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
1926 		       (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
1927 		       MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
1928 		       MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
1929 	if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
1930 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
1931 			       spec->rss_context !=
1932 			       EFX_FILTER_RSS_CONTEXT_DEFAULT ?
1933 			       spec->rss_context : nic_data->rx_rss_context);
1934 }
1935 
1936 static int efx_ef10_filter_push(struct efx_nic *efx,
1937 				const struct efx_filter_spec *spec,
1938 				u64 *handle, bool replacing)
1939 {
1940 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
1941 	MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
1942 	int rc;
1943 
1944 	efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
1945 	rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
1946 			  outbuf, sizeof(outbuf), NULL);
1947 	if (rc == 0)
1948 		*handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
1949 	return rc;
1950 }
1951 
1952 static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
1953 					enum efx_filter_match_flags match_flags)
1954 {
1955 	unsigned int match_pri;
1956 
1957 	for (match_pri = 0;
1958 	     match_pri < table->rx_match_count;
1959 	     match_pri++)
1960 		if (table->rx_match_flags[match_pri] == match_flags)
1961 			return match_pri;
1962 
1963 	return -EPROTONOSUPPORT;
1964 }
1965 
1966 static s32 efx_ef10_filter_insert(struct efx_nic *efx,
1967 				  struct efx_filter_spec *spec,
1968 				  bool replace_equal)
1969 {
1970 	struct efx_ef10_filter_table *table = efx->filter_state;
1971 	DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
1972 	struct efx_filter_spec *saved_spec;
1973 	unsigned int match_pri, hash;
1974 	unsigned int priv_flags;
1975 	bool replacing = false;
1976 	int ins_index = -1;
1977 	DEFINE_WAIT(wait);
1978 	bool is_mc_recip;
1979 	s32 rc;
1980 
1981 	/* For now, only support RX filters */
1982 	if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
1983 	    EFX_FILTER_FLAG_RX)
1984 		return -EINVAL;
1985 
1986 	rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
1987 	if (rc < 0)
1988 		return rc;
1989 	match_pri = rc;
1990 
1991 	hash = efx_ef10_filter_hash(spec);
1992 	is_mc_recip = efx_filter_is_mc_recipient(spec);
1993 	if (is_mc_recip)
1994 		bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
1995 
1996 	/* Find any existing filters with the same match tuple or
1997 	 * else a free slot to insert at.  If any of them are busy,
1998 	 * we have to wait and retry.
1999 	 */
2000 	for (;;) {
2001 		unsigned int depth = 1;
2002 		unsigned int i;
2003 
2004 		spin_lock_bh(&efx->filter_lock);
2005 
2006 		for (;;) {
2007 			i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2008 			saved_spec = efx_ef10_filter_entry_spec(table, i);
2009 
2010 			if (!saved_spec) {
2011 				if (ins_index < 0)
2012 					ins_index = i;
2013 			} else if (efx_ef10_filter_equal(spec, saved_spec)) {
2014 				if (table->entry[i].spec &
2015 				    EFX_EF10_FILTER_FLAG_BUSY)
2016 					break;
2017 				if (spec->priority < saved_spec->priority &&
2018 				    !(saved_spec->priority ==
2019 				      EFX_FILTER_PRI_REQUIRED &&
2020 				      saved_spec->flags &
2021 				      EFX_FILTER_FLAG_RX_STACK)) {
2022 					rc = -EPERM;
2023 					goto out_unlock;
2024 				}
2025 				if (!is_mc_recip) {
2026 					/* This is the only one */
2027 					if (spec->priority ==
2028 					    saved_spec->priority &&
2029 					    !replace_equal) {
2030 						rc = -EEXIST;
2031 						goto out_unlock;
2032 					}
2033 					ins_index = i;
2034 					goto found;
2035 				} else if (spec->priority >
2036 					   saved_spec->priority ||
2037 					   (spec->priority ==
2038 					    saved_spec->priority &&
2039 					    replace_equal)) {
2040 					if (ins_index < 0)
2041 						ins_index = i;
2042 					else
2043 						__set_bit(depth, mc_rem_map);
2044 				}
2045 			}
2046 
2047 			/* Once we reach the maximum search depth, use
2048 			 * the first suitable slot or return -EBUSY if
2049 			 * there was none
2050 			 */
2051 			if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
2052 				if (ins_index < 0) {
2053 					rc = -EBUSY;
2054 					goto out_unlock;
2055 				}
2056 				goto found;
2057 			}
2058 
2059 			++depth;
2060 		}
2061 
2062 		prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2063 		spin_unlock_bh(&efx->filter_lock);
2064 		schedule();
2065 	}
2066 
2067 found:
2068 	/* Create a software table entry if necessary, and mark it
2069 	 * busy.  We might yet fail to insert, but any attempt to
2070 	 * insert a conflicting filter while we're waiting for the
2071 	 * firmware must find the busy entry.
2072 	 */
2073 	saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2074 	if (saved_spec) {
2075 		if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
2076 			/* Just make sure it won't be removed */
2077 			saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
2078 			table->entry[ins_index].spec &=
2079 				~EFX_EF10_FILTER_FLAG_STACK_OLD;
2080 			rc = ins_index;
2081 			goto out_unlock;
2082 		}
2083 		replacing = true;
2084 		priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
2085 	} else {
2086 		saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2087 		if (!saved_spec) {
2088 			rc = -ENOMEM;
2089 			goto out_unlock;
2090 		}
2091 		*saved_spec = *spec;
2092 		priv_flags = 0;
2093 	}
2094 	efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2095 				  priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
2096 
2097 	/* Mark lower-priority multicast recipients busy prior to removal */
2098 	if (is_mc_recip) {
2099 		unsigned int depth, i;
2100 
2101 		for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2102 			i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2103 			if (test_bit(depth, mc_rem_map))
2104 				table->entry[i].spec |=
2105 					EFX_EF10_FILTER_FLAG_BUSY;
2106 		}
2107 	}
2108 
2109 	spin_unlock_bh(&efx->filter_lock);
2110 
2111 	rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
2112 				  replacing);
2113 
2114 	/* Finalise the software table entry */
2115 	spin_lock_bh(&efx->filter_lock);
2116 	if (rc == 0) {
2117 		if (replacing) {
2118 			/* Update the fields that may differ */
2119 			saved_spec->priority = spec->priority;
2120 			saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
2121 			saved_spec->flags |= spec->flags;
2122 			saved_spec->rss_context = spec->rss_context;
2123 			saved_spec->dmaq_id = spec->dmaq_id;
2124 		}
2125 	} else if (!replacing) {
2126 		kfree(saved_spec);
2127 		saved_spec = NULL;
2128 	}
2129 	efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
2130 
2131 	/* Remove and finalise entries for lower-priority multicast
2132 	 * recipients
2133 	 */
2134 	if (is_mc_recip) {
2135 		MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2136 		unsigned int depth, i;
2137 
2138 		memset(inbuf, 0, sizeof(inbuf));
2139 
2140 		for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
2141 			if (!test_bit(depth, mc_rem_map))
2142 				continue;
2143 
2144 			i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2145 			saved_spec = efx_ef10_filter_entry_spec(table, i);
2146 			priv_flags = efx_ef10_filter_entry_flags(table, i);
2147 
2148 			if (rc == 0) {
2149 				spin_unlock_bh(&efx->filter_lock);
2150 				MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2151 					       MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2152 				MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2153 					       table->entry[i].handle);
2154 				rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2155 						  inbuf, sizeof(inbuf),
2156 						  NULL, 0, NULL);
2157 				spin_lock_bh(&efx->filter_lock);
2158 			}
2159 
2160 			if (rc == 0) {
2161 				kfree(saved_spec);
2162 				saved_spec = NULL;
2163 				priv_flags = 0;
2164 			} else {
2165 				priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
2166 			}
2167 			efx_ef10_filter_set_entry(table, i, saved_spec,
2168 						  priv_flags);
2169 		}
2170 	}
2171 
2172 	/* If successful, return the inserted filter ID */
2173 	if (rc == 0)
2174 		rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
2175 
2176 	wake_up_all(&table->waitq);
2177 out_unlock:
2178 	spin_unlock_bh(&efx->filter_lock);
2179 	finish_wait(&table->waitq, &wait);
2180 	return rc;
2181 }
2182 
2183 void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
2184 {
2185 	/* no need to do anything here on EF10 */
2186 }
2187 
2188 /* Remove a filter.
2189  * If !stack_requested, remove by ID
2190  * If stack_requested, remove by index
2191  * Filter ID may come from userland and must be range-checked.
2192  */
2193 static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
2194 					   enum efx_filter_priority priority,
2195 					   u32 filter_id, bool stack_requested)
2196 {
2197 	unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2198 	struct efx_ef10_filter_table *table = efx->filter_state;
2199 	MCDI_DECLARE_BUF(inbuf,
2200 			 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2201 			 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2202 	struct efx_filter_spec *spec;
2203 	DEFINE_WAIT(wait);
2204 	int rc;
2205 
2206 	/* Find the software table entry and mark it busy.  Don't
2207 	 * remove it yet; any attempt to update while we're waiting
2208 	 * for the firmware must find the busy entry.
2209 	 */
2210 	for (;;) {
2211 		spin_lock_bh(&efx->filter_lock);
2212 		if (!(table->entry[filter_idx].spec &
2213 		      EFX_EF10_FILTER_FLAG_BUSY))
2214 			break;
2215 		prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
2216 		spin_unlock_bh(&efx->filter_lock);
2217 		schedule();
2218 	}
2219 	spec = efx_ef10_filter_entry_spec(table, filter_idx);
2220 	if (!spec || spec->priority > priority ||
2221 	    (!stack_requested &&
2222 	     efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
2223 	     filter_id / HUNT_FILTER_TBL_ROWS)) {
2224 		rc = -ENOENT;
2225 		goto out_unlock;
2226 	}
2227 	table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2228 	spin_unlock_bh(&efx->filter_lock);
2229 
2230 	if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
2231 		/* Reset steering of a stack-owned filter */
2232 
2233 		struct efx_filter_spec new_spec = *spec;
2234 
2235 		new_spec.priority = EFX_FILTER_PRI_REQUIRED;
2236 		new_spec.flags = (EFX_FILTER_FLAG_RX |
2237 				  EFX_FILTER_FLAG_RX_RSS |
2238 				  EFX_FILTER_FLAG_RX_STACK);
2239 		new_spec.dmaq_id = 0;
2240 		new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
2241 		rc = efx_ef10_filter_push(efx, &new_spec,
2242 					  &table->entry[filter_idx].handle,
2243 					  true);
2244 
2245 		spin_lock_bh(&efx->filter_lock);
2246 		if (rc == 0)
2247 			*spec = new_spec;
2248 	} else {
2249 		/* Really remove the filter */
2250 
2251 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2252 			       efx_ef10_filter_is_exclusive(spec) ?
2253 			       MC_CMD_FILTER_OP_IN_OP_REMOVE :
2254 			       MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2255 		MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2256 			       table->entry[filter_idx].handle);
2257 		rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
2258 				  inbuf, sizeof(inbuf), NULL, 0, NULL);
2259 
2260 		spin_lock_bh(&efx->filter_lock);
2261 		if (rc == 0) {
2262 			kfree(spec);
2263 			efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2264 		}
2265 	}
2266 	table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2267 	wake_up_all(&table->waitq);
2268 out_unlock:
2269 	spin_unlock_bh(&efx->filter_lock);
2270 	finish_wait(&table->waitq, &wait);
2271 	return rc;
2272 }
2273 
2274 static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
2275 				       enum efx_filter_priority priority,
2276 				       u32 filter_id)
2277 {
2278 	return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
2279 }
2280 
2281 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
2282 				    enum efx_filter_priority priority,
2283 				    u32 filter_id, struct efx_filter_spec *spec)
2284 {
2285 	unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
2286 	struct efx_ef10_filter_table *table = efx->filter_state;
2287 	const struct efx_filter_spec *saved_spec;
2288 	int rc;
2289 
2290 	spin_lock_bh(&efx->filter_lock);
2291 	saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
2292 	if (saved_spec && saved_spec->priority == priority &&
2293 	    efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
2294 	    filter_id / HUNT_FILTER_TBL_ROWS) {
2295 		*spec = *saved_spec;
2296 		rc = 0;
2297 	} else {
2298 		rc = -ENOENT;
2299 	}
2300 	spin_unlock_bh(&efx->filter_lock);
2301 	return rc;
2302 }
2303 
2304 static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
2305 				     enum efx_filter_priority priority)
2306 {
2307 	/* TODO */
2308 }
2309 
2310 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
2311 					 enum efx_filter_priority priority)
2312 {
2313 	struct efx_ef10_filter_table *table = efx->filter_state;
2314 	unsigned int filter_idx;
2315 	s32 count = 0;
2316 
2317 	spin_lock_bh(&efx->filter_lock);
2318 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2319 		if (table->entry[filter_idx].spec &&
2320 		    efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
2321 		    priority)
2322 			++count;
2323 	}
2324 	spin_unlock_bh(&efx->filter_lock);
2325 	return count;
2326 }
2327 
2328 static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
2329 {
2330 	struct efx_ef10_filter_table *table = efx->filter_state;
2331 
2332 	return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
2333 }
2334 
2335 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
2336 				      enum efx_filter_priority priority,
2337 				      u32 *buf, u32 size)
2338 {
2339 	struct efx_ef10_filter_table *table = efx->filter_state;
2340 	struct efx_filter_spec *spec;
2341 	unsigned int filter_idx;
2342 	s32 count = 0;
2343 
2344 	spin_lock_bh(&efx->filter_lock);
2345 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2346 		spec = efx_ef10_filter_entry_spec(table, filter_idx);
2347 		if (spec && spec->priority == priority) {
2348 			if (count == size) {
2349 				count = -EMSGSIZE;
2350 				break;
2351 			}
2352 			buf[count++] = (efx_ef10_filter_rx_match_pri(
2353 						table, spec->match_flags) *
2354 					HUNT_FILTER_TBL_ROWS +
2355 					filter_idx);
2356 		}
2357 	}
2358 	spin_unlock_bh(&efx->filter_lock);
2359 	return count;
2360 }
2361 
2362 #ifdef CONFIG_RFS_ACCEL
2363 
2364 static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
2365 
2366 static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
2367 				      struct efx_filter_spec *spec)
2368 {
2369 	struct efx_ef10_filter_table *table = efx->filter_state;
2370 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2371 	struct efx_filter_spec *saved_spec;
2372 	unsigned int hash, i, depth = 1;
2373 	bool replacing = false;
2374 	int ins_index = -1;
2375 	u64 cookie;
2376 	s32 rc;
2377 
2378 	/* Must be an RX filter without RSS and not for a multicast
2379 	 * destination address (RFS only works for connected sockets).
2380 	 * These restrictions allow us to pass only a tiny amount of
2381 	 * data through to the completion function.
2382 	 */
2383 	EFX_WARN_ON_PARANOID(spec->flags !=
2384 			     (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
2385 	EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
2386 	EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
2387 
2388 	hash = efx_ef10_filter_hash(spec);
2389 
2390 	spin_lock_bh(&efx->filter_lock);
2391 
2392 	/* Find any existing filter with the same match tuple or else
2393 	 * a free slot to insert at.  If an existing filter is busy,
2394 	 * we have to give up.
2395 	 */
2396 	for (;;) {
2397 		i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
2398 		saved_spec = efx_ef10_filter_entry_spec(table, i);
2399 
2400 		if (!saved_spec) {
2401 			if (ins_index < 0)
2402 				ins_index = i;
2403 		} else if (efx_ef10_filter_equal(spec, saved_spec)) {
2404 			if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
2405 				rc = -EBUSY;
2406 				goto fail_unlock;
2407 			}
2408 			EFX_WARN_ON_PARANOID(saved_spec->flags &
2409 					     EFX_FILTER_FLAG_RX_STACK);
2410 			if (spec->priority < saved_spec->priority) {
2411 				rc = -EPERM;
2412 				goto fail_unlock;
2413 			}
2414 			ins_index = i;
2415 			break;
2416 		}
2417 
2418 		/* Once we reach the maximum search depth, use the
2419 		 * first suitable slot or return -EBUSY if there was
2420 		 * none
2421 		 */
2422 		if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
2423 			if (ins_index < 0) {
2424 				rc = -EBUSY;
2425 				goto fail_unlock;
2426 			}
2427 			break;
2428 		}
2429 
2430 		++depth;
2431 	}
2432 
2433 	/* Create a software table entry if necessary, and mark it
2434 	 * busy.  We might yet fail to insert, but any attempt to
2435 	 * insert a conflicting filter while we're waiting for the
2436 	 * firmware must find the busy entry.
2437 	 */
2438 	saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
2439 	if (saved_spec) {
2440 		replacing = true;
2441 	} else {
2442 		saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
2443 		if (!saved_spec) {
2444 			rc = -ENOMEM;
2445 			goto fail_unlock;
2446 		}
2447 		*saved_spec = *spec;
2448 	}
2449 	efx_ef10_filter_set_entry(table, ins_index, saved_spec,
2450 				  EFX_EF10_FILTER_FLAG_BUSY);
2451 
2452 	spin_unlock_bh(&efx->filter_lock);
2453 
2454 	/* Pack up the variables needed on completion */
2455 	cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
2456 
2457 	efx_ef10_filter_push_prep(efx, spec, inbuf,
2458 				  table->entry[ins_index].handle, replacing);
2459 	efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2460 			   MC_CMD_FILTER_OP_OUT_LEN,
2461 			   efx_ef10_filter_rfs_insert_complete, cookie);
2462 
2463 	return ins_index;
2464 
2465 fail_unlock:
2466 	spin_unlock_bh(&efx->filter_lock);
2467 	return rc;
2468 }
2469 
2470 static void
2471 efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
2472 				    int rc, efx_dword_t *outbuf,
2473 				    size_t outlen_actual)
2474 {
2475 	struct efx_ef10_filter_table *table = efx->filter_state;
2476 	unsigned int ins_index, dmaq_id;
2477 	struct efx_filter_spec *spec;
2478 	bool replacing;
2479 
2480 	/* Unpack the cookie */
2481 	replacing = cookie >> 31;
2482 	ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
2483 	dmaq_id = cookie & 0xffff;
2484 
2485 	spin_lock_bh(&efx->filter_lock);
2486 	spec = efx_ef10_filter_entry_spec(table, ins_index);
2487 	if (rc == 0) {
2488 		table->entry[ins_index].handle =
2489 			MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
2490 		if (replacing)
2491 			spec->dmaq_id = dmaq_id;
2492 	} else if (!replacing) {
2493 		kfree(spec);
2494 		spec = NULL;
2495 	}
2496 	efx_ef10_filter_set_entry(table, ins_index, spec, 0);
2497 	spin_unlock_bh(&efx->filter_lock);
2498 
2499 	wake_up_all(&table->waitq);
2500 }
2501 
2502 static void
2503 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2504 				    unsigned long filter_idx,
2505 				    int rc, efx_dword_t *outbuf,
2506 				    size_t outlen_actual);
2507 
2508 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
2509 					   unsigned int filter_idx)
2510 {
2511 	struct efx_ef10_filter_table *table = efx->filter_state;
2512 	struct efx_filter_spec *spec =
2513 		efx_ef10_filter_entry_spec(table, filter_idx);
2514 	MCDI_DECLARE_BUF(inbuf,
2515 			 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
2516 			 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
2517 
2518 	if (!spec ||
2519 	    (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
2520 	    spec->priority != EFX_FILTER_PRI_HINT ||
2521 	    !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
2522 				 flow_id, filter_idx))
2523 		return false;
2524 
2525 	MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2526 		       MC_CMD_FILTER_OP_IN_OP_REMOVE);
2527 	MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2528 		       table->entry[filter_idx].handle);
2529 	if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
2530 			       efx_ef10_filter_rfs_expire_complete, filter_idx))
2531 		return false;
2532 
2533 	table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2534 	return true;
2535 }
2536 
2537 static void
2538 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
2539 				    unsigned long filter_idx,
2540 				    int rc, efx_dword_t *outbuf,
2541 				    size_t outlen_actual)
2542 {
2543 	struct efx_ef10_filter_table *table = efx->filter_state;
2544 	struct efx_filter_spec *spec =
2545 		efx_ef10_filter_entry_spec(table, filter_idx);
2546 
2547 	spin_lock_bh(&efx->filter_lock);
2548 	if (rc == 0) {
2549 		kfree(spec);
2550 		efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2551 	}
2552 	table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
2553 	wake_up_all(&table->waitq);
2554 	spin_unlock_bh(&efx->filter_lock);
2555 }
2556 
2557 #endif /* CONFIG_RFS_ACCEL */
2558 
2559 static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
2560 {
2561 	int match_flags = 0;
2562 
2563 #define MAP_FLAG(gen_flag, mcdi_field) {				\
2564 		u32 old_mcdi_flags = mcdi_flags;			\
2565 		mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ##	\
2566 				mcdi_field ## _LBN);			\
2567 		if (mcdi_flags != old_mcdi_flags)			\
2568 			match_flags |= EFX_FILTER_MATCH_ ## gen_flag;	\
2569 	}
2570 	MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
2571 	MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
2572 	MAP_FLAG(REM_HOST, SRC_IP);
2573 	MAP_FLAG(LOC_HOST, DST_IP);
2574 	MAP_FLAG(REM_MAC, SRC_MAC);
2575 	MAP_FLAG(REM_PORT, SRC_PORT);
2576 	MAP_FLAG(LOC_MAC, DST_MAC);
2577 	MAP_FLAG(LOC_PORT, DST_PORT);
2578 	MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
2579 	MAP_FLAG(INNER_VID, INNER_VLAN);
2580 	MAP_FLAG(OUTER_VID, OUTER_VLAN);
2581 	MAP_FLAG(IP_PROTO, IP_PROTO);
2582 #undef MAP_FLAG
2583 
2584 	/* Did we map them all? */
2585 	if (mcdi_flags)
2586 		return -EINVAL;
2587 
2588 	return match_flags;
2589 }
2590 
2591 static int efx_ef10_filter_table_probe(struct efx_nic *efx)
2592 {
2593 	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
2594 	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
2595 	unsigned int pd_match_pri, pd_match_count;
2596 	struct efx_ef10_filter_table *table;
2597 	size_t outlen;
2598 	int rc;
2599 
2600 	table = kzalloc(sizeof(*table), GFP_KERNEL);
2601 	if (!table)
2602 		return -ENOMEM;
2603 
2604 	/* Find out which RX filter types are supported, and their priorities */
2605 	MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
2606 		       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
2607 	rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
2608 			  inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
2609 			  &outlen);
2610 	if (rc)
2611 		goto fail;
2612 	pd_match_count = MCDI_VAR_ARRAY_LEN(
2613 		outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
2614 	table->rx_match_count = 0;
2615 
2616 	for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
2617 		u32 mcdi_flags =
2618 			MCDI_ARRAY_DWORD(
2619 				outbuf,
2620 				GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
2621 				pd_match_pri);
2622 		rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
2623 		if (rc < 0) {
2624 			netif_dbg(efx, probe, efx->net_dev,
2625 				  "%s: fw flags %#x pri %u not supported in driver\n",
2626 				  __func__, mcdi_flags, pd_match_pri);
2627 		} else {
2628 			netif_dbg(efx, probe, efx->net_dev,
2629 				  "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
2630 				  __func__, mcdi_flags, pd_match_pri,
2631 				  rc, table->rx_match_count);
2632 			table->rx_match_flags[table->rx_match_count++] = rc;
2633 		}
2634 	}
2635 
2636 	table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
2637 	if (!table->entry) {
2638 		rc = -ENOMEM;
2639 		goto fail;
2640 	}
2641 
2642 	efx->filter_state = table;
2643 	init_waitqueue_head(&table->waitq);
2644 	return 0;
2645 
2646 fail:
2647 	kfree(table);
2648 	return rc;
2649 }
2650 
2651 static void efx_ef10_filter_table_restore(struct efx_nic *efx)
2652 {
2653 	struct efx_ef10_filter_table *table = efx->filter_state;
2654 	struct efx_ef10_nic_data *nic_data = efx->nic_data;
2655 	struct efx_filter_spec *spec;
2656 	unsigned int filter_idx;
2657 	bool failed = false;
2658 	int rc;
2659 
2660 	if (!nic_data->must_restore_filters)
2661 		return;
2662 
2663 	spin_lock_bh(&efx->filter_lock);
2664 
2665 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2666 		spec = efx_ef10_filter_entry_spec(table, filter_idx);
2667 		if (!spec)
2668 			continue;
2669 
2670 		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
2671 		spin_unlock_bh(&efx->filter_lock);
2672 
2673 		rc = efx_ef10_filter_push(efx, spec,
2674 					  &table->entry[filter_idx].handle,
2675 					  false);
2676 		if (rc)
2677 			failed = true;
2678 
2679 		spin_lock_bh(&efx->filter_lock);
2680 		if (rc) {
2681 			kfree(spec);
2682 			efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
2683 		} else {
2684 			table->entry[filter_idx].spec &=
2685 				~EFX_EF10_FILTER_FLAG_BUSY;
2686 		}
2687 	}
2688 
2689 	spin_unlock_bh(&efx->filter_lock);
2690 
2691 	if (failed)
2692 		netif_err(efx, hw, efx->net_dev,
2693 			  "unable to restore all filters\n");
2694 	else
2695 		nic_data->must_restore_filters = false;
2696 }
2697 
2698 static void efx_ef10_filter_table_remove(struct efx_nic *efx)
2699 {
2700 	struct efx_ef10_filter_table *table = efx->filter_state;
2701 	MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
2702 	struct efx_filter_spec *spec;
2703 	unsigned int filter_idx;
2704 	int rc;
2705 
2706 	for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
2707 		spec = efx_ef10_filter_entry_spec(table, filter_idx);
2708 		if (!spec)
2709 			continue;
2710 
2711 		MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2712 			       efx_ef10_filter_is_exclusive(spec) ?
2713 			       MC_CMD_FILTER_OP_IN_OP_REMOVE :
2714 			       MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
2715 		MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
2716 			       table->entry[filter_idx].handle);
2717 		rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
2718 				  NULL, 0, NULL);
2719 
2720 		WARN_ON(rc != 0);
2721 		kfree(spec);
2722 	}
2723 
2724 	vfree(table->entry);
2725 	kfree(table);
2726 }
2727 
2728 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
2729 {
2730 	struct efx_ef10_filter_table *table = efx->filter_state;
2731 	struct net_device *net_dev = efx->net_dev;
2732 	struct efx_filter_spec spec;
2733 	bool remove_failed = false;
2734 	struct netdev_hw_addr *uc;
2735 	struct netdev_hw_addr *mc;
2736 	unsigned int filter_idx;
2737 	int i, n, rc;
2738 
2739 	if (!efx_dev_registered(efx))
2740 		return;
2741 
2742 	/* Mark old filters that may need to be removed */
2743 	spin_lock_bh(&efx->filter_lock);
2744 	n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
2745 	for (i = 0; i < n; i++) {
2746 		filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
2747 		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
2748 	}
2749 	n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
2750 	for (i = 0; i < n; i++) {
2751 		filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
2752 		table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
2753 	}
2754 	spin_unlock_bh(&efx->filter_lock);
2755 
2756 	/* Copy/convert the address lists; add the primary station
2757 	 * address and broadcast address
2758 	 */
2759 	netif_addr_lock_bh(net_dev);
2760 	if (net_dev->flags & IFF_PROMISC ||
2761 	    netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
2762 		table->stack_uc_count = -1;
2763 	} else {
2764 		table->stack_uc_count = 1 + netdev_uc_count(net_dev);
2765 		memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
2766 		       ETH_ALEN);
2767 		i = 1;
2768 		netdev_for_each_uc_addr(uc, net_dev) {
2769 			memcpy(table->stack_uc_list[i].addr,
2770 			       uc->addr, ETH_ALEN);
2771 			i++;
2772 		}
2773 	}
2774 	if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
2775 	    netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
2776 		table->stack_mc_count = -1;
2777 	} else {
2778 		table->stack_mc_count = 1 + netdev_mc_count(net_dev);
2779 		eth_broadcast_addr(table->stack_mc_list[0].addr);
2780 		i = 1;
2781 		netdev_for_each_mc_addr(mc, net_dev) {
2782 			memcpy(table->stack_mc_list[i].addr,
2783 			       mc->addr, ETH_ALEN);
2784 			i++;
2785 		}
2786 	}
2787 	netif_addr_unlock_bh(net_dev);
2788 
2789 	/* Insert/renew unicast filters */
2790 	if (table->stack_uc_count >= 0) {
2791 		for (i = 0; i < table->stack_uc_count; i++) {
2792 			efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2793 					   EFX_FILTER_FLAG_RX_RSS |
2794 					   EFX_FILTER_FLAG_RX_STACK,
2795 					   0);
2796 			efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
2797 						 table->stack_uc_list[i].addr);
2798 			rc = efx_ef10_filter_insert(efx, &spec, true);
2799 			if (rc < 0) {
2800 				/* Fall back to unicast-promisc */
2801 				while (i--)
2802 					efx_ef10_filter_remove_safe(
2803 						efx, EFX_FILTER_PRI_REQUIRED,
2804 						table->stack_uc_list[i].id);
2805 				table->stack_uc_count = -1;
2806 				break;
2807 			}
2808 			table->stack_uc_list[i].id = rc;
2809 		}
2810 	}
2811 	if (table->stack_uc_count < 0) {
2812 		efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2813 				   EFX_FILTER_FLAG_RX_RSS |
2814 				   EFX_FILTER_FLAG_RX_STACK,
2815 				   0);
2816 		efx_filter_set_uc_def(&spec);
2817 		rc = efx_ef10_filter_insert(efx, &spec, true);
2818 		if (rc < 0) {
2819 			WARN_ON(1);
2820 			table->stack_uc_count = 0;
2821 		} else {
2822 			table->stack_uc_list[0].id = rc;
2823 		}
2824 	}
2825 
2826 	/* Insert/renew multicast filters */
2827 	if (table->stack_mc_count >= 0) {
2828 		for (i = 0; i < table->stack_mc_count; i++) {
2829 			efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2830 					   EFX_FILTER_FLAG_RX_RSS |
2831 					   EFX_FILTER_FLAG_RX_STACK,
2832 					   0);
2833 			efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
2834 						 table->stack_mc_list[i].addr);
2835 			rc = efx_ef10_filter_insert(efx, &spec, true);
2836 			if (rc < 0) {
2837 				/* Fall back to multicast-promisc */
2838 				while (i--)
2839 					efx_ef10_filter_remove_safe(
2840 						efx, EFX_FILTER_PRI_REQUIRED,
2841 						table->stack_mc_list[i].id);
2842 				table->stack_mc_count = -1;
2843 				break;
2844 			}
2845 			table->stack_mc_list[i].id = rc;
2846 		}
2847 	}
2848 	if (table->stack_mc_count < 0) {
2849 		efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
2850 				   EFX_FILTER_FLAG_RX_RSS |
2851 				   EFX_FILTER_FLAG_RX_STACK,
2852 				   0);
2853 		efx_filter_set_mc_def(&spec);
2854 		rc = efx_ef10_filter_insert(efx, &spec, true);
2855 		if (rc < 0) {
2856 			WARN_ON(1);
2857 			table->stack_mc_count = 0;
2858 		} else {
2859 			table->stack_mc_list[0].id = rc;
2860 		}
2861 	}
2862 
2863 	/* Remove filters that weren't renewed.  Since nothing else
2864 	 * changes the STACK_OLD flag or removes these filters, we
2865 	 * don't need to hold the filter_lock while scanning for
2866 	 * these filters.
2867 	 */
2868 	for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
2869 		if (ACCESS_ONCE(table->entry[i].spec) &
2870 		    EFX_EF10_FILTER_FLAG_STACK_OLD) {
2871 			if (efx_ef10_filter_remove_internal(efx,
2872 					EFX_FILTER_PRI_REQUIRED,
2873 					i, true) < 0)
2874 				remove_failed = true;
2875 		}
2876 	}
2877 	WARN_ON(remove_failed);
2878 }
2879 
2880 static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
2881 {
2882 	efx_ef10_filter_sync_rx_mode(efx);
2883 
2884 	return efx_mcdi_set_mac(efx);
2885 }
2886 
2887 #ifdef CONFIG_SFC_MTD
2888 
2889 struct efx_ef10_nvram_type_info {
2890 	u16 type, type_mask;
2891 	u8 port;
2892 	const char *name;
2893 };
2894 
2895 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
2896 	{ NVRAM_PARTITION_TYPE_MC_FIRMWARE,	   0,    0, "sfc_mcfw" },
2897 	{ NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0,    0, "sfc_mcfw_backup" },
2898 	{ NVRAM_PARTITION_TYPE_EXPANSION_ROM,	   0,    0, "sfc_exp_rom" },
2899 	{ NVRAM_PARTITION_TYPE_STATIC_CONFIG,	   0,    0, "sfc_static_cfg" },
2900 	{ NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG,	   0,    0, "sfc_dynamic_cfg" },
2901 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0,   0, "sfc_exp_rom_cfg" },
2902 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0,   1, "sfc_exp_rom_cfg" },
2903 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0,   2, "sfc_exp_rom_cfg" },
2904 	{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0,   3, "sfc_exp_rom_cfg" },
2905 	{ NVRAM_PARTITION_TYPE_PHY_MIN,		   0xff, 0, "sfc_phy_fw" },
2906 };
2907 
2908 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
2909 					struct efx_mcdi_mtd_partition *part,
2910 					unsigned int type)
2911 {
2912 	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
2913 	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
2914 	const struct efx_ef10_nvram_type_info *info;
2915 	size_t size, erase_size, outlen;
2916 	bool protected;
2917 	int rc;
2918 
2919 	for (info = efx_ef10_nvram_types; ; info++) {
2920 		if (info ==
2921 		    efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
2922 			return -ENODEV;
2923 		if ((type & ~info->type_mask) == info->type)
2924 			break;
2925 	}
2926 	if (info->port != efx_port_num(efx))
2927 		return -ENODEV;
2928 
2929 	rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
2930 	if (rc)
2931 		return rc;
2932 	if (protected)
2933 		return -ENODEV; /* hide it */
2934 
2935 	part->nvram_type = type;
2936 
2937 	MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
2938 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
2939 			  outbuf, sizeof(outbuf), &outlen);
2940 	if (rc)
2941 		return rc;
2942 	if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
2943 		return -EIO;
2944 	if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
2945 	    (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
2946 		part->fw_subtype = MCDI_DWORD(outbuf,
2947 					      NVRAM_METADATA_OUT_SUBTYPE);
2948 
2949 	part->common.dev_type_name = "EF10 NVRAM manager";
2950 	part->common.type_name = info->name;
2951 
2952 	part->common.mtd.type = MTD_NORFLASH;
2953 	part->common.mtd.flags = MTD_CAP_NORFLASH;
2954 	part->common.mtd.size = size;
2955 	part->common.mtd.erasesize = erase_size;
2956 
2957 	return 0;
2958 }
2959 
2960 static int efx_ef10_mtd_probe(struct efx_nic *efx)
2961 {
2962 	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
2963 	struct efx_mcdi_mtd_partition *parts;
2964 	size_t outlen, n_parts_total, i, n_parts;
2965 	unsigned int type;
2966 	int rc;
2967 
2968 	ASSERT_RTNL();
2969 
2970 	BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
2971 	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
2972 			  outbuf, sizeof(outbuf), &outlen);
2973 	if (rc)
2974 		return rc;
2975 	if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
2976 		return -EIO;
2977 
2978 	n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
2979 	if (n_parts_total >
2980 	    MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
2981 		return -EIO;
2982 
2983 	parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
2984 	if (!parts)
2985 		return -ENOMEM;
2986 
2987 	n_parts = 0;
2988 	for (i = 0; i < n_parts_total; i++) {
2989 		type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
2990 					i);
2991 		rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
2992 		if (rc == 0)
2993 			n_parts++;
2994 		else if (rc != -ENODEV)
2995 			goto fail;
2996 	}
2997 
2998 	rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
2999 fail:
3000 	if (rc)
3001 		kfree(parts);
3002 	return rc;
3003 }
3004 
3005 #endif /* CONFIG_SFC_MTD */
3006 
3007 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
3008 {
3009 	_efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
3010 }
3011 
3012 const struct efx_nic_type efx_hunt_a0_nic_type = {
3013 	.mem_map_size = efx_ef10_mem_map_size,
3014 	.probe = efx_ef10_probe,
3015 	.remove = efx_ef10_remove,
3016 	.dimension_resources = efx_ef10_dimension_resources,
3017 	.init = efx_ef10_init_nic,
3018 	.fini = efx_port_dummy_op_void,
3019 	.map_reset_reason = efx_mcdi_map_reset_reason,
3020 	.map_reset_flags = efx_ef10_map_reset_flags,
3021 	.reset = efx_mcdi_reset,
3022 	.probe_port = efx_mcdi_port_probe,
3023 	.remove_port = efx_mcdi_port_remove,
3024 	.fini_dmaq = efx_ef10_fini_dmaq,
3025 	.describe_stats = efx_ef10_describe_stats,
3026 	.update_stats = efx_ef10_update_stats,
3027 	.start_stats = efx_mcdi_mac_start_stats,
3028 	.stop_stats = efx_mcdi_mac_stop_stats,
3029 	.set_id_led = efx_mcdi_set_id_led,
3030 	.push_irq_moderation = efx_ef10_push_irq_moderation,
3031 	.reconfigure_mac = efx_ef10_mac_reconfigure,
3032 	.check_mac_fault = efx_mcdi_mac_check_fault,
3033 	.reconfigure_port = efx_mcdi_port_reconfigure,
3034 	.get_wol = efx_ef10_get_wol,
3035 	.set_wol = efx_ef10_set_wol,
3036 	.resume_wol = efx_port_dummy_op_void,
3037 	/* TODO: test_chip */
3038 	.test_nvram = efx_mcdi_nvram_test_all,
3039 	.mcdi_request = efx_ef10_mcdi_request,
3040 	.mcdi_poll_response = efx_ef10_mcdi_poll_response,
3041 	.mcdi_read_response = efx_ef10_mcdi_read_response,
3042 	.mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
3043 	.irq_enable_master = efx_port_dummy_op_void,
3044 	.irq_test_generate = efx_ef10_irq_test_generate,
3045 	.irq_disable_non_ev = efx_port_dummy_op_void,
3046 	.irq_handle_msi = efx_ef10_msi_interrupt,
3047 	.irq_handle_legacy = efx_ef10_legacy_interrupt,
3048 	.tx_probe = efx_ef10_tx_probe,
3049 	.tx_init = efx_ef10_tx_init,
3050 	.tx_remove = efx_ef10_tx_remove,
3051 	.tx_write = efx_ef10_tx_write,
3052 	.rx_push_indir_table = efx_ef10_rx_push_indir_table,
3053 	.rx_probe = efx_ef10_rx_probe,
3054 	.rx_init = efx_ef10_rx_init,
3055 	.rx_remove = efx_ef10_rx_remove,
3056 	.rx_write = efx_ef10_rx_write,
3057 	.rx_defer_refill = efx_ef10_rx_defer_refill,
3058 	.ev_probe = efx_ef10_ev_probe,
3059 	.ev_init = efx_ef10_ev_init,
3060 	.ev_fini = efx_ef10_ev_fini,
3061 	.ev_remove = efx_ef10_ev_remove,
3062 	.ev_process = efx_ef10_ev_process,
3063 	.ev_read_ack = efx_ef10_ev_read_ack,
3064 	.ev_test_generate = efx_ef10_ev_test_generate,
3065 	.filter_table_probe = efx_ef10_filter_table_probe,
3066 	.filter_table_restore = efx_ef10_filter_table_restore,
3067 	.filter_table_remove = efx_ef10_filter_table_remove,
3068 	.filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
3069 	.filter_insert = efx_ef10_filter_insert,
3070 	.filter_remove_safe = efx_ef10_filter_remove_safe,
3071 	.filter_get_safe = efx_ef10_filter_get_safe,
3072 	.filter_clear_rx = efx_ef10_filter_clear_rx,
3073 	.filter_count_rx_used = efx_ef10_filter_count_rx_used,
3074 	.filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
3075 	.filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
3076 #ifdef CONFIG_RFS_ACCEL
3077 	.filter_rfs_insert = efx_ef10_filter_rfs_insert,
3078 	.filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
3079 #endif
3080 #ifdef CONFIG_SFC_MTD
3081 	.mtd_probe = efx_ef10_mtd_probe,
3082 	.mtd_rename = efx_mcdi_mtd_rename,
3083 	.mtd_read = efx_mcdi_mtd_read,
3084 	.mtd_erase = efx_mcdi_mtd_erase,
3085 	.mtd_write = efx_mcdi_mtd_write,
3086 	.mtd_sync = efx_mcdi_mtd_sync,
3087 #endif
3088 	.ptp_write_host_time = efx_ef10_ptp_write_host_time,
3089 
3090 	.revision = EFX_REV_HUNT_A0,
3091 	.max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
3092 	.rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
3093 	.rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
3094 	.can_rx_scatter = true,
3095 	.always_rx_scatter = true,
3096 	.max_interrupt_mode = EFX_INT_MODE_MSIX,
3097 	.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
3098 	.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3099 			     NETIF_F_RXHASH | NETIF_F_NTUPLE),
3100 	.mcdi_max_ver = 2,
3101 	.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
3102 };
3103