1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <asm/param.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/etherdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/stddef.h>
21 #include <linux/string.h>
22 #include <linux/version.h>
23 #include <linux/workqueue.h>
24 #include <linux/bitops.h>
25 #include <linux/bug.h>
26 #include "qed.h"
27 #include <linux/qed/qed_chain.h>
28 #include "qed_cxt.h"
29 #include "qed_dev_api.h"
30 #include <linux/qed/qed_eth_if.h>
31 #include "qed_hsi.h"
32 #include "qed_hw.h"
33 #include "qed_int.h"
34 #include "qed_l2.h"
35 #include "qed_mcp.h"
36 #include "qed_reg_addr.h"
37 #include "qed_sp.h"
38 #include "qed_sriov.h"
39 
40 
41 #define QED_MAX_SGES_NUM 16
42 #define CRC32_POLY 0x1edc6f41
43 
44 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
45 			   struct qed_sp_vport_start_params *p_params)
46 {
47 	struct vport_start_ramrod_data *p_ramrod = NULL;
48 	struct qed_spq_entry *p_ent =  NULL;
49 	struct qed_sp_init_data init_data;
50 	u8 abs_vport_id = 0;
51 	int rc = -EINVAL;
52 	u16 rx_mode = 0;
53 
54 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
55 	if (rc != 0)
56 		return rc;
57 
58 	memset(&init_data, 0, sizeof(init_data));
59 	init_data.cid = qed_spq_get_cid(p_hwfn);
60 	init_data.opaque_fid = p_params->opaque_fid;
61 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
62 
63 	rc = qed_sp_init_request(p_hwfn, &p_ent,
64 				 ETH_RAMROD_VPORT_START,
65 				 PROTOCOLID_ETH, &init_data);
66 	if (rc)
67 		return rc;
68 
69 	p_ramrod		= &p_ent->ramrod.vport_start;
70 	p_ramrod->vport_id	= abs_vport_id;
71 
72 	p_ramrod->mtu			= cpu_to_le16(p_params->mtu);
73 	p_ramrod->inner_vlan_removal_en	= p_params->remove_inner_vlan;
74 	p_ramrod->drop_ttl0_en		= p_params->drop_ttl0;
75 	p_ramrod->untagged		= p_params->only_untagged;
76 
77 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
78 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
79 
80 	p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
81 
82 	/* TPA related fields */
83 	memset(&p_ramrod->tpa_param, 0,
84 	       sizeof(struct eth_vport_tpa_param));
85 
86 	p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
87 
88 	switch (p_params->tpa_mode) {
89 	case QED_TPA_MODE_GRO:
90 		p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
91 		p_ramrod->tpa_param.tpa_max_size = (u16)-1;
92 		p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
93 		p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
94 		p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
95 		p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
96 		p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
97 		p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
98 		break;
99 	default:
100 		break;
101 	}
102 
103 	p_ramrod->tx_switching_en = p_params->tx_switching;
104 
105 	/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
106 	p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
107 						  p_params->concrete_fid);
108 
109 	return qed_spq_post(p_hwfn, p_ent, NULL);
110 }
111 
112 int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
113 		       struct qed_sp_vport_start_params *p_params)
114 {
115 	if (IS_VF(p_hwfn->cdev)) {
116 		return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
117 					     p_params->mtu,
118 					     p_params->remove_inner_vlan,
119 					     p_params->tpa_mode,
120 					     p_params->max_buffers_per_cqe,
121 					     p_params->only_untagged);
122 	}
123 
124 	return qed_sp_eth_vport_start(p_hwfn, p_params);
125 }
126 
127 static int
128 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
129 			struct vport_update_ramrod_data *p_ramrod,
130 			struct qed_rss_params *p_params)
131 {
132 	struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
133 	u16 abs_l2_queue = 0, capabilities = 0;
134 	int rc = 0, i;
135 
136 	if (!p_params) {
137 		p_ramrod->common.update_rss_flg = 0;
138 		return rc;
139 	}
140 
141 	BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
142 		     ETH_RSS_IND_TABLE_ENTRIES_NUM);
143 
144 	rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
145 	if (rc)
146 		return rc;
147 
148 	p_ramrod->common.update_rss_flg = p_params->update_rss_config;
149 	rss->update_rss_capabilities = p_params->update_rss_capabilities;
150 	rss->update_rss_ind_table = p_params->update_rss_ind_table;
151 	rss->update_rss_key = p_params->update_rss_key;
152 
153 	rss->rss_mode = p_params->rss_enable ?
154 			ETH_VPORT_RSS_MODE_REGULAR :
155 			ETH_VPORT_RSS_MODE_DISABLED;
156 
157 	SET_FIELD(capabilities,
158 		  ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
159 		  !!(p_params->rss_caps & QED_RSS_IPV4));
160 	SET_FIELD(capabilities,
161 		  ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
162 		  !!(p_params->rss_caps & QED_RSS_IPV6));
163 	SET_FIELD(capabilities,
164 		  ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
165 		  !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
166 	SET_FIELD(capabilities,
167 		  ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
168 		  !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
169 	SET_FIELD(capabilities,
170 		  ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
171 		  !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
172 	SET_FIELD(capabilities,
173 		  ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
174 		  !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
175 	rss->tbl_size = p_params->rss_table_size_log;
176 
177 	rss->capabilities = cpu_to_le16(capabilities);
178 
179 	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
180 		   "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
181 		   p_ramrod->common.update_rss_flg,
182 		   rss->rss_mode, rss->update_rss_capabilities,
183 		   capabilities, rss->update_rss_ind_table,
184 		   rss->update_rss_key);
185 
186 	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
187 		rc = qed_fw_l2_queue(p_hwfn,
188 				     (u8)p_params->rss_ind_table[i],
189 				     &abs_l2_queue);
190 		if (rc)
191 			return rc;
192 
193 		rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
194 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
195 			   i, rss->indirection_table[i]);
196 	}
197 
198 	for (i = 0; i < 10; i++)
199 		rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
200 
201 	return rc;
202 }
203 
204 static void
205 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
206 			  struct vport_update_ramrod_data *p_ramrod,
207 			  struct qed_filter_accept_flags accept_flags)
208 {
209 	p_ramrod->common.update_rx_mode_flg =
210 		accept_flags.update_rx_mode_config;
211 
212 	p_ramrod->common.update_tx_mode_flg =
213 		accept_flags.update_tx_mode_config;
214 
215 	/* Set Rx mode accept flags */
216 	if (p_ramrod->common.update_rx_mode_flg) {
217 		u8 accept_filter = accept_flags.rx_accept_filter;
218 		u16 state = 0;
219 
220 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
221 			  !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
222 			    !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
223 
224 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
225 			  !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
226 
227 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
228 			  !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
229 			    !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
230 
231 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
232 			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
233 			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
234 
235 		SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
236 			  !!(accept_filter & QED_ACCEPT_BCAST));
237 
238 		p_ramrod->rx_mode.state = cpu_to_le16(state);
239 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
240 			   "p_ramrod->rx_mode.state = 0x%x\n", state);
241 	}
242 
243 	/* Set Tx mode accept flags */
244 	if (p_ramrod->common.update_tx_mode_flg) {
245 		u8 accept_filter = accept_flags.tx_accept_filter;
246 		u16 state = 0;
247 
248 		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
249 			  !!(accept_filter & QED_ACCEPT_NONE));
250 
251 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
252 			  !!(accept_filter & QED_ACCEPT_NONE));
253 
254 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
255 			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
256 			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
257 
258 		SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
259 			  !!(accept_filter & QED_ACCEPT_BCAST));
260 
261 		p_ramrod->tx_mode.state = cpu_to_le16(state);
262 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
263 			   "p_ramrod->tx_mode.state = 0x%x\n", state);
264 	}
265 }
266 
267 static void
268 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
269 			    struct vport_update_ramrod_data *p_ramrod,
270 			    struct qed_sge_tpa_params *p_params)
271 {
272 	struct eth_vport_tpa_param *p_tpa;
273 
274 	if (!p_params) {
275 		p_ramrod->common.update_tpa_param_flg = 0;
276 		p_ramrod->common.update_tpa_en_flg = 0;
277 		p_ramrod->common.update_tpa_param_flg = 0;
278 		return;
279 	}
280 
281 	p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
282 	p_tpa = &p_ramrod->tpa_param;
283 	p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
284 	p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
285 	p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
286 	p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
287 
288 	p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
289 	p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
290 	p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
291 	p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
292 	p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
293 	p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
294 	p_tpa->tpa_max_size = p_params->tpa_max_size;
295 	p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
296 	p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
297 }
298 
299 static void
300 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
301 			struct vport_update_ramrod_data *p_ramrod,
302 			struct qed_sp_vport_update_params *p_params)
303 {
304 	int i;
305 
306 	memset(&p_ramrod->approx_mcast.bins, 0,
307 	       sizeof(p_ramrod->approx_mcast.bins));
308 
309 	if (p_params->update_approx_mcast_flg) {
310 		p_ramrod->common.update_approx_mcast_flg = 1;
311 		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
312 			u32 *p_bins = (u32 *)p_params->bins;
313 			__le32 val = cpu_to_le32(p_bins[i]);
314 
315 			p_ramrod->approx_mcast.bins[i] = val;
316 		}
317 	}
318 }
319 
320 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
321 			struct qed_sp_vport_update_params *p_params,
322 			enum spq_mode comp_mode,
323 			struct qed_spq_comp_cb *p_comp_data)
324 {
325 	struct qed_rss_params *p_rss_params = p_params->rss_params;
326 	struct vport_update_ramrod_data_cmn *p_cmn;
327 	struct qed_sp_init_data init_data;
328 	struct vport_update_ramrod_data *p_ramrod = NULL;
329 	struct qed_spq_entry *p_ent = NULL;
330 	u8 abs_vport_id = 0, val;
331 	int rc = -EINVAL;
332 
333 	if (IS_VF(p_hwfn->cdev)) {
334 		rc = qed_vf_pf_vport_update(p_hwfn, p_params);
335 		return rc;
336 	}
337 
338 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
339 	if (rc != 0)
340 		return rc;
341 
342 	memset(&init_data, 0, sizeof(init_data));
343 	init_data.cid = qed_spq_get_cid(p_hwfn);
344 	init_data.opaque_fid = p_params->opaque_fid;
345 	init_data.comp_mode = comp_mode;
346 	init_data.p_comp_data = p_comp_data;
347 
348 	rc = qed_sp_init_request(p_hwfn, &p_ent,
349 				 ETH_RAMROD_VPORT_UPDATE,
350 				 PROTOCOLID_ETH, &init_data);
351 	if (rc)
352 		return rc;
353 
354 	/* Copy input params to ramrod according to FW struct */
355 	p_ramrod = &p_ent->ramrod.vport_update;
356 	p_cmn = &p_ramrod->common;
357 
358 	p_cmn->vport_id = abs_vport_id;
359 	p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
360 	p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
361 	p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
362 	p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
363 	p_cmn->accept_any_vlan = p_params->accept_any_vlan;
364 	p_cmn->update_accept_any_vlan_flg =
365 			p_params->update_accept_any_vlan_flg;
366 
367 	p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
368 	val = p_params->update_inner_vlan_removal_flg;
369 	p_cmn->update_inner_vlan_removal_en_flg = val;
370 
371 	p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
372 	val = p_params->update_default_vlan_enable_flg;
373 	p_cmn->update_default_vlan_en_flg = val;
374 
375 	p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
376 	p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
377 
378 	p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
379 
380 	p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
381 	p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
382 
383 	p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
384 	val = p_params->update_anti_spoofing_en_flg;
385 	p_ramrod->common.update_anti_spoofing_en_flg = val;
386 
387 	rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
388 	if (rc) {
389 		/* Return spq entry which is taken in qed_sp_init_request()*/
390 		qed_spq_return_entry(p_hwfn, p_ent);
391 		return rc;
392 	}
393 
394 	/* Update mcast bins for VFs, PF doesn't use this functionality */
395 	qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
396 
397 	qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
398 	qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
399 	return qed_spq_post(p_hwfn, p_ent, NULL);
400 }
401 
402 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
403 {
404 	struct vport_stop_ramrod_data *p_ramrod;
405 	struct qed_sp_init_data init_data;
406 	struct qed_spq_entry *p_ent;
407 	u8 abs_vport_id = 0;
408 	int rc;
409 
410 	if (IS_VF(p_hwfn->cdev))
411 		return qed_vf_pf_vport_stop(p_hwfn);
412 
413 	rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
414 	if (rc != 0)
415 		return rc;
416 
417 	memset(&init_data, 0, sizeof(init_data));
418 	init_data.cid = qed_spq_get_cid(p_hwfn);
419 	init_data.opaque_fid = opaque_fid;
420 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
421 
422 	rc = qed_sp_init_request(p_hwfn, &p_ent,
423 				 ETH_RAMROD_VPORT_STOP,
424 				 PROTOCOLID_ETH, &init_data);
425 	if (rc)
426 		return rc;
427 
428 	p_ramrod = &p_ent->ramrod.vport_stop;
429 	p_ramrod->vport_id = abs_vport_id;
430 
431 	return qed_spq_post(p_hwfn, p_ent, NULL);
432 }
433 
434 static int
435 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
436 		       struct qed_filter_accept_flags *p_accept_flags)
437 {
438 	struct qed_sp_vport_update_params s_params;
439 
440 	memset(&s_params, 0, sizeof(s_params));
441 	memcpy(&s_params.accept_flags, p_accept_flags,
442 	       sizeof(struct qed_filter_accept_flags));
443 
444 	return qed_vf_pf_vport_update(p_hwfn, &s_params);
445 }
446 
447 static int qed_filter_accept_cmd(struct qed_dev *cdev,
448 				 u8 vport,
449 				 struct qed_filter_accept_flags accept_flags,
450 				 u8 update_accept_any_vlan,
451 				 u8 accept_any_vlan,
452 				 enum spq_mode comp_mode,
453 				 struct qed_spq_comp_cb *p_comp_data)
454 {
455 	struct qed_sp_vport_update_params vport_update_params;
456 	int i, rc;
457 
458 	/* Prepare and send the vport rx_mode change */
459 	memset(&vport_update_params, 0, sizeof(vport_update_params));
460 	vport_update_params.vport_id = vport;
461 	vport_update_params.accept_flags = accept_flags;
462 	vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
463 	vport_update_params.accept_any_vlan = accept_any_vlan;
464 
465 	for_each_hwfn(cdev, i) {
466 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
467 
468 		vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
469 
470 		if (IS_VF(cdev)) {
471 			rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
472 			if (rc)
473 				return rc;
474 			continue;
475 		}
476 
477 		rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
478 					 comp_mode, p_comp_data);
479 		if (rc != 0) {
480 			DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
481 			return rc;
482 		}
483 
484 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
485 			   "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
486 			   accept_flags.rx_accept_filter,
487 			   accept_flags.tx_accept_filter);
488 		if (update_accept_any_vlan)
489 			DP_VERBOSE(p_hwfn, QED_MSG_SP,
490 				   "accept_any_vlan=%d configured\n",
491 				   accept_any_vlan);
492 	}
493 
494 	return 0;
495 }
496 
497 static int qed_sp_release_queue_cid(
498 	struct qed_hwfn *p_hwfn,
499 	struct qed_hw_cid_data *p_cid_data)
500 {
501 	if (!p_cid_data->b_cid_allocated)
502 		return 0;
503 
504 	qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
505 
506 	p_cid_data->b_cid_allocated = false;
507 
508 	return 0;
509 }
510 
511 int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
512 				u16 opaque_fid,
513 				u32 cid,
514 				struct qed_queue_start_common_params *params,
515 				u8 stats_id,
516 				u16 bd_max_bytes,
517 				dma_addr_t bd_chain_phys_addr,
518 				dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
519 {
520 	struct rx_queue_start_ramrod_data *p_ramrod = NULL;
521 	struct qed_spq_entry *p_ent = NULL;
522 	struct qed_sp_init_data init_data;
523 	struct qed_hw_cid_data *p_rx_cid;
524 	u16 abs_rx_q_id = 0;
525 	u8 abs_vport_id = 0;
526 	int rc = -EINVAL;
527 
528 	/* Store information for the stop */
529 	p_rx_cid		= &p_hwfn->p_rx_cids[params->queue_id];
530 	p_rx_cid->cid		= cid;
531 	p_rx_cid->opaque_fid	= opaque_fid;
532 	p_rx_cid->vport_id	= params->vport_id;
533 
534 	rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
535 	if (rc != 0)
536 		return rc;
537 
538 	rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
539 	if (rc != 0)
540 		return rc;
541 
542 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
543 		   "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
544 		   opaque_fid, cid, params->queue_id, params->vport_id,
545 		   params->sb);
546 
547 	/* Get SPQ entry */
548 	memset(&init_data, 0, sizeof(init_data));
549 	init_data.cid = cid;
550 	init_data.opaque_fid = opaque_fid;
551 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
552 
553 	rc = qed_sp_init_request(p_hwfn, &p_ent,
554 				 ETH_RAMROD_RX_QUEUE_START,
555 				 PROTOCOLID_ETH, &init_data);
556 	if (rc)
557 		return rc;
558 
559 	p_ramrod = &p_ent->ramrod.rx_queue_start;
560 
561 	p_ramrod->sb_id			= cpu_to_le16(params->sb);
562 	p_ramrod->sb_index		= params->sb_idx;
563 	p_ramrod->vport_id		= abs_vport_id;
564 	p_ramrod->stats_counter_id	= stats_id;
565 	p_ramrod->rx_queue_id		= cpu_to_le16(abs_rx_q_id);
566 	p_ramrod->complete_cqe_flg	= 0;
567 	p_ramrod->complete_event_flg	= 1;
568 
569 	p_ramrod->bd_max_bytes	= cpu_to_le16(bd_max_bytes);
570 	DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
571 
572 	p_ramrod->num_of_pbl_pages	= cpu_to_le16(cqe_pbl_size);
573 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
574 
575 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
576 
577 	return rc;
578 }
579 
580 static int
581 qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
582 			  u16 opaque_fid,
583 			  struct qed_queue_start_common_params *params,
584 			  u16 bd_max_bytes,
585 			  dma_addr_t bd_chain_phys_addr,
586 			  dma_addr_t cqe_pbl_addr,
587 			  u16 cqe_pbl_size, void __iomem **pp_prod)
588 {
589 	struct qed_hw_cid_data *p_rx_cid;
590 	u64 init_prod_val = 0;
591 	u16 abs_l2_queue = 0;
592 	u8 abs_stats_id = 0;
593 	int rc;
594 
595 	if (IS_VF(p_hwfn->cdev)) {
596 		return qed_vf_pf_rxq_start(p_hwfn,
597 					   params->queue_id,
598 					   params->sb,
599 					   params->sb_idx,
600 					   bd_max_bytes,
601 					   bd_chain_phys_addr,
602 					   cqe_pbl_addr, cqe_pbl_size, pp_prod);
603 	}
604 
605 	rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
606 	if (rc != 0)
607 		return rc;
608 
609 	rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
610 	if (rc != 0)
611 		return rc;
612 
613 	*pp_prod = (u8 __iomem *)p_hwfn->regview +
614 				 GTT_BAR0_MAP_REG_MSDM_RAM +
615 				 MSTORM_PRODS_OFFSET(abs_l2_queue);
616 
617 	/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
618 	__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
619 			  (u32 *)(&init_prod_val));
620 
621 	/* Allocate a CID for the queue */
622 	p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
623 	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
624 				 &p_rx_cid->cid);
625 	if (rc) {
626 		DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
627 		return rc;
628 	}
629 	p_rx_cid->b_cid_allocated = true;
630 
631 	rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
632 					 opaque_fid,
633 					 p_rx_cid->cid,
634 					 params,
635 					 abs_stats_id,
636 					 bd_max_bytes,
637 					 bd_chain_phys_addr,
638 					 cqe_pbl_addr,
639 					 cqe_pbl_size);
640 
641 	if (rc != 0)
642 		qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
643 
644 	return rc;
645 }
646 
647 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
648 				u16 rx_queue_id,
649 				u8 num_rxqs,
650 				u8 complete_cqe_flg,
651 				u8 complete_event_flg,
652 				enum spq_mode comp_mode,
653 				struct qed_spq_comp_cb *p_comp_data)
654 {
655 	struct rx_queue_update_ramrod_data *p_ramrod = NULL;
656 	struct qed_spq_entry *p_ent = NULL;
657 	struct qed_sp_init_data init_data;
658 	struct qed_hw_cid_data *p_rx_cid;
659 	u16 qid, abs_rx_q_id = 0;
660 	int rc = -EINVAL;
661 	u8 i;
662 
663 	memset(&init_data, 0, sizeof(init_data));
664 	init_data.comp_mode = comp_mode;
665 	init_data.p_comp_data = p_comp_data;
666 
667 	for (i = 0; i < num_rxqs; i++) {
668 		qid = rx_queue_id + i;
669 		p_rx_cid = &p_hwfn->p_rx_cids[qid];
670 
671 		/* Get SPQ entry */
672 		init_data.cid = p_rx_cid->cid;
673 		init_data.opaque_fid = p_rx_cid->opaque_fid;
674 
675 		rc = qed_sp_init_request(p_hwfn, &p_ent,
676 					 ETH_RAMROD_RX_QUEUE_UPDATE,
677 					 PROTOCOLID_ETH, &init_data);
678 		if (rc)
679 			return rc;
680 
681 		p_ramrod = &p_ent->ramrod.rx_queue_update;
682 
683 		qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
684 		qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
685 		p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
686 		p_ramrod->complete_cqe_flg = complete_cqe_flg;
687 		p_ramrod->complete_event_flg = complete_event_flg;
688 
689 		rc = qed_spq_post(p_hwfn, p_ent, NULL);
690 		if (rc)
691 			return rc;
692 	}
693 
694 	return rc;
695 }
696 
697 int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
698 			     u16 rx_queue_id,
699 			     bool eq_completion_only, bool cqe_completion)
700 {
701 	struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
702 	struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
703 	struct qed_spq_entry *p_ent = NULL;
704 	struct qed_sp_init_data init_data;
705 	u16 abs_rx_q_id = 0;
706 	int rc = -EINVAL;
707 
708 	if (IS_VF(p_hwfn->cdev))
709 		return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion);
710 
711 	/* Get SPQ entry */
712 	memset(&init_data, 0, sizeof(init_data));
713 	init_data.cid = p_rx_cid->cid;
714 	init_data.opaque_fid = p_rx_cid->opaque_fid;
715 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
716 
717 	rc = qed_sp_init_request(p_hwfn, &p_ent,
718 				 ETH_RAMROD_RX_QUEUE_STOP,
719 				 PROTOCOLID_ETH, &init_data);
720 	if (rc)
721 		return rc;
722 
723 	p_ramrod = &p_ent->ramrod.rx_queue_stop;
724 
725 	qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
726 	qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
727 	p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
728 
729 	/* Cleaning the queue requires the completion to arrive there.
730 	 * In addition, VFs require the answer to come as eqe to PF.
731 	 */
732 	p_ramrod->complete_cqe_flg =
733 		(!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
734 		 !eq_completion_only) || cqe_completion;
735 	p_ramrod->complete_event_flg =
736 		!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
737 		eq_completion_only;
738 
739 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
740 	if (rc)
741 		return rc;
742 
743 	return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
744 }
745 
746 int qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
747 				u16  opaque_fid,
748 				u32  cid,
749 				struct qed_queue_start_common_params *p_params,
750 				u8  stats_id,
751 				dma_addr_t pbl_addr,
752 				u16 pbl_size,
753 				union qed_qm_pq_params *p_pq_params)
754 {
755 	struct tx_queue_start_ramrod_data *p_ramrod = NULL;
756 	struct qed_spq_entry *p_ent = NULL;
757 	struct qed_sp_init_data init_data;
758 	struct qed_hw_cid_data *p_tx_cid;
759 	u8 abs_vport_id;
760 	int rc = -EINVAL;
761 	u16 pq_id;
762 
763 	/* Store information for the stop */
764 	p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
765 	p_tx_cid->cid		= cid;
766 	p_tx_cid->opaque_fid	= opaque_fid;
767 
768 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
769 	if (rc)
770 		return rc;
771 
772 	/* Get SPQ entry */
773 	memset(&init_data, 0, sizeof(init_data));
774 	init_data.cid = cid;
775 	init_data.opaque_fid = opaque_fid;
776 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
777 
778 	rc = qed_sp_init_request(p_hwfn, &p_ent,
779 				 ETH_RAMROD_TX_QUEUE_START,
780 				 PROTOCOLID_ETH, &init_data);
781 	if (rc)
782 		return rc;
783 
784 	p_ramrod		= &p_ent->ramrod.tx_queue_start;
785 	p_ramrod->vport_id	= abs_vport_id;
786 
787 	p_ramrod->sb_id			= cpu_to_le16(p_params->sb);
788 	p_ramrod->sb_index		= p_params->sb_idx;
789 	p_ramrod->stats_counter_id	= stats_id;
790 
791 	p_ramrod->pbl_size		= cpu_to_le16(pbl_size);
792 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
793 
794 	pq_id			= qed_get_qm_pq(p_hwfn,
795 						PROTOCOLID_ETH,
796 						p_pq_params);
797 	p_ramrod->qm_pq_id	= cpu_to_le16(pq_id);
798 
799 	return qed_spq_post(p_hwfn, p_ent, NULL);
800 }
801 
802 static int
803 qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
804 			  u16 opaque_fid,
805 			  struct qed_queue_start_common_params *p_params,
806 			  dma_addr_t pbl_addr,
807 			  u16 pbl_size, void __iomem **pp_doorbell)
808 {
809 	struct qed_hw_cid_data *p_tx_cid;
810 	union qed_qm_pq_params pq_params;
811 	u8 abs_stats_id = 0;
812 	int rc;
813 
814 	if (IS_VF(p_hwfn->cdev)) {
815 		return qed_vf_pf_txq_start(p_hwfn,
816 					   p_params->queue_id,
817 					   p_params->sb,
818 					   p_params->sb_idx,
819 					   pbl_addr, pbl_size, pp_doorbell);
820 	}
821 
822 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
823 	if (rc)
824 		return rc;
825 
826 	p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
827 	memset(p_tx_cid, 0, sizeof(*p_tx_cid));
828 	memset(&pq_params, 0, sizeof(pq_params));
829 
830 	/* Allocate a CID for the queue */
831 	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
832 				 &p_tx_cid->cid);
833 	if (rc) {
834 		DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
835 		return rc;
836 	}
837 	p_tx_cid->b_cid_allocated = true;
838 
839 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
840 		   "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
841 		   opaque_fid, p_tx_cid->cid,
842 		   p_params->queue_id, p_params->vport_id, p_params->sb);
843 
844 	rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
845 					 opaque_fid,
846 					 p_tx_cid->cid,
847 					 p_params,
848 					 abs_stats_id,
849 					 pbl_addr,
850 					 pbl_size,
851 					 &pq_params);
852 
853 	*pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
854 				     qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
855 
856 	if (rc)
857 		qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
858 
859 	return rc;
860 }
861 
862 int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
863 {
864 	struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
865 	struct qed_spq_entry *p_ent = NULL;
866 	struct qed_sp_init_data init_data;
867 	int rc = -EINVAL;
868 
869 	if (IS_VF(p_hwfn->cdev))
870 		return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id);
871 
872 	/* Get SPQ entry */
873 	memset(&init_data, 0, sizeof(init_data));
874 	init_data.cid = p_tx_cid->cid;
875 	init_data.opaque_fid = p_tx_cid->opaque_fid;
876 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
877 
878 	rc = qed_sp_init_request(p_hwfn, &p_ent,
879 				 ETH_RAMROD_TX_QUEUE_STOP,
880 				 PROTOCOLID_ETH, &init_data);
881 	if (rc)
882 		return rc;
883 
884 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
885 	if (rc)
886 		return rc;
887 
888 	return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
889 }
890 
891 static enum eth_filter_action
892 qed_filter_action(enum qed_filter_opcode opcode)
893 {
894 	enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
895 
896 	switch (opcode) {
897 	case QED_FILTER_ADD:
898 		action = ETH_FILTER_ACTION_ADD;
899 		break;
900 	case QED_FILTER_REMOVE:
901 		action = ETH_FILTER_ACTION_REMOVE;
902 		break;
903 	case QED_FILTER_FLUSH:
904 		action = ETH_FILTER_ACTION_REMOVE_ALL;
905 		break;
906 	default:
907 		action = MAX_ETH_FILTER_ACTION;
908 	}
909 
910 	return action;
911 }
912 
913 static void qed_set_fw_mac_addr(__le16 *fw_msb,
914 				__le16 *fw_mid,
915 				__le16 *fw_lsb,
916 				u8 *mac)
917 {
918 	((u8 *)fw_msb)[0] = mac[1];
919 	((u8 *)fw_msb)[1] = mac[0];
920 	((u8 *)fw_mid)[0] = mac[3];
921 	((u8 *)fw_mid)[1] = mac[2];
922 	((u8 *)fw_lsb)[0] = mac[5];
923 	((u8 *)fw_lsb)[1] = mac[4];
924 }
925 
926 static int
927 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
928 			u16 opaque_fid,
929 			struct qed_filter_ucast *p_filter_cmd,
930 			struct vport_filter_update_ramrod_data **pp_ramrod,
931 			struct qed_spq_entry **pp_ent,
932 			enum spq_mode comp_mode,
933 			struct qed_spq_comp_cb *p_comp_data)
934 {
935 	u8 vport_to_add_to = 0, vport_to_remove_from = 0;
936 	struct vport_filter_update_ramrod_data *p_ramrod;
937 	struct eth_filter_cmd *p_first_filter;
938 	struct eth_filter_cmd *p_second_filter;
939 	struct qed_sp_init_data init_data;
940 	enum eth_filter_action action;
941 	int rc;
942 
943 	rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
944 			  &vport_to_remove_from);
945 	if (rc)
946 		return rc;
947 
948 	rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
949 			  &vport_to_add_to);
950 	if (rc)
951 		return rc;
952 
953 	/* Get SPQ entry */
954 	memset(&init_data, 0, sizeof(init_data));
955 	init_data.cid = qed_spq_get_cid(p_hwfn);
956 	init_data.opaque_fid = opaque_fid;
957 	init_data.comp_mode = comp_mode;
958 	init_data.p_comp_data = p_comp_data;
959 
960 	rc = qed_sp_init_request(p_hwfn, pp_ent,
961 				 ETH_RAMROD_FILTERS_UPDATE,
962 				 PROTOCOLID_ETH, &init_data);
963 	if (rc)
964 		return rc;
965 
966 	*pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
967 	p_ramrod = *pp_ramrod;
968 	p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
969 	p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
970 
971 	switch (p_filter_cmd->opcode) {
972 	case QED_FILTER_REPLACE:
973 	case QED_FILTER_MOVE:
974 		p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
975 	default:
976 		p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
977 	}
978 
979 	p_first_filter	= &p_ramrod->filter_cmds[0];
980 	p_second_filter = &p_ramrod->filter_cmds[1];
981 
982 	switch (p_filter_cmd->type) {
983 	case QED_FILTER_MAC:
984 		p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
985 	case QED_FILTER_VLAN:
986 		p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
987 	case QED_FILTER_MAC_VLAN:
988 		p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
989 	case QED_FILTER_INNER_MAC:
990 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
991 	case QED_FILTER_INNER_VLAN:
992 		p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
993 	case QED_FILTER_INNER_PAIR:
994 		p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
995 	case QED_FILTER_INNER_MAC_VNI_PAIR:
996 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
997 		break;
998 	case QED_FILTER_MAC_VNI_PAIR:
999 		p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1000 	case QED_FILTER_VNI:
1001 		p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1002 	}
1003 
1004 	if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1005 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1006 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1007 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1008 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1009 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
1010 		qed_set_fw_mac_addr(&p_first_filter->mac_msb,
1011 				    &p_first_filter->mac_mid,
1012 				    &p_first_filter->mac_lsb,
1013 				    (u8 *)p_filter_cmd->mac);
1014 	}
1015 
1016 	if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1017 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1018 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1019 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1020 		p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
1021 
1022 	if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1023 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1024 	    (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1025 		p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
1026 
1027 	if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
1028 		p_second_filter->type		= p_first_filter->type;
1029 		p_second_filter->mac_msb	= p_first_filter->mac_msb;
1030 		p_second_filter->mac_mid	= p_first_filter->mac_mid;
1031 		p_second_filter->mac_lsb	= p_first_filter->mac_lsb;
1032 		p_second_filter->vlan_id	= p_first_filter->vlan_id;
1033 		p_second_filter->vni		= p_first_filter->vni;
1034 
1035 		p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1036 
1037 		p_first_filter->vport_id = vport_to_remove_from;
1038 
1039 		p_second_filter->action		= ETH_FILTER_ACTION_ADD;
1040 		p_second_filter->vport_id	= vport_to_add_to;
1041 	} else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
1042 		p_first_filter->vport_id = vport_to_add_to;
1043 		memcpy(p_second_filter, p_first_filter,
1044 		       sizeof(*p_second_filter));
1045 		p_first_filter->action	= ETH_FILTER_ACTION_REMOVE_ALL;
1046 		p_second_filter->action = ETH_FILTER_ACTION_ADD;
1047 	} else {
1048 		action = qed_filter_action(p_filter_cmd->opcode);
1049 
1050 		if (action == MAX_ETH_FILTER_ACTION) {
1051 			DP_NOTICE(p_hwfn,
1052 				  "%d is not supported yet\n",
1053 				  p_filter_cmd->opcode);
1054 			return -EINVAL;
1055 		}
1056 
1057 		p_first_filter->action = action;
1058 		p_first_filter->vport_id = (p_filter_cmd->opcode ==
1059 					    QED_FILTER_REMOVE) ?
1060 					   vport_to_remove_from :
1061 					   vport_to_add_to;
1062 	}
1063 
1064 	return 0;
1065 }
1066 
1067 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1068 			    u16 opaque_fid,
1069 			    struct qed_filter_ucast *p_filter_cmd,
1070 			    enum spq_mode comp_mode,
1071 			    struct qed_spq_comp_cb *p_comp_data)
1072 {
1073 	struct vport_filter_update_ramrod_data	*p_ramrod	= NULL;
1074 	struct qed_spq_entry			*p_ent		= NULL;
1075 	struct eth_filter_cmd_header		*p_header;
1076 	int					rc;
1077 
1078 	rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1079 				     &p_ramrod, &p_ent,
1080 				     comp_mode, p_comp_data);
1081 	if (rc != 0) {
1082 		DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1083 		return rc;
1084 	}
1085 	p_header = &p_ramrod->filter_cmd_hdr;
1086 	p_header->assert_on_error = p_filter_cmd->assert_on_error;
1087 
1088 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1089 	if (rc != 0) {
1090 		DP_ERR(p_hwfn,
1091 		       "Unicast filter ADD command failed %d\n",
1092 		       rc);
1093 		return rc;
1094 	}
1095 
1096 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
1097 		   "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1098 		   (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1099 		   ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1100 		   "REMOVE" :
1101 		   ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1102 		    "MOVE" : "REPLACE")),
1103 		   (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1104 		   ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1105 		    "VLAN" : "MAC & VLAN"),
1106 		   p_ramrod->filter_cmd_hdr.cmd_cnt,
1107 		   p_filter_cmd->is_rx_filter,
1108 		   p_filter_cmd->is_tx_filter);
1109 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
1110 		   "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1111 		   p_filter_cmd->vport_to_add_to,
1112 		   p_filter_cmd->vport_to_remove_from,
1113 		   p_filter_cmd->mac[0],
1114 		   p_filter_cmd->mac[1],
1115 		   p_filter_cmd->mac[2],
1116 		   p_filter_cmd->mac[3],
1117 		   p_filter_cmd->mac[4],
1118 		   p_filter_cmd->mac[5],
1119 		   p_filter_cmd->vlan);
1120 
1121 	return 0;
1122 }
1123 
1124 /*******************************************************************************
1125  * Description:
1126  *         Calculates crc 32 on a buffer
1127  *         Note: crc32_length MUST be aligned to 8
1128  * Return:
1129  ******************************************************************************/
1130 static u32 qed_calc_crc32c(u8 *crc32_packet,
1131 			   u32 crc32_length,
1132 			   u32 crc32_seed,
1133 			   u8 complement)
1134 {
1135 	u32 byte = 0;
1136 	u32 bit = 0;
1137 	u8 msb = 0;
1138 	u8 current_byte = 0;
1139 	u32 crc32_result = crc32_seed;
1140 
1141 	if ((!crc32_packet) ||
1142 	    (crc32_length == 0) ||
1143 	    ((crc32_length % 8) != 0))
1144 		return crc32_result;
1145 	for (byte = 0; byte < crc32_length; byte++) {
1146 		current_byte = crc32_packet[byte];
1147 		for (bit = 0; bit < 8; bit++) {
1148 			msb = (u8)(crc32_result >> 31);
1149 			crc32_result = crc32_result << 1;
1150 			if (msb != (0x1 & (current_byte >> bit))) {
1151 				crc32_result = crc32_result ^ CRC32_POLY;
1152 				crc32_result |= 1; /*crc32_result[0] = 1;*/
1153 			}
1154 		}
1155 	}
1156 	return crc32_result;
1157 }
1158 
1159 static inline u32 qed_crc32c_le(u32 seed,
1160 				u8 *mac,
1161 				u32 len)
1162 {
1163 	u32 packet_buf[2] = { 0 };
1164 
1165 	memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1166 	return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1167 }
1168 
1169 u8 qed_mcast_bin_from_mac(u8 *mac)
1170 {
1171 	u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1172 				mac, ETH_ALEN);
1173 
1174 	return crc & 0xff;
1175 }
1176 
1177 static int
1178 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1179 			u16 opaque_fid,
1180 			struct qed_filter_mcast *p_filter_cmd,
1181 			enum spq_mode comp_mode,
1182 			struct qed_spq_comp_cb *p_comp_data)
1183 {
1184 	unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1185 	struct vport_update_ramrod_data *p_ramrod = NULL;
1186 	struct qed_spq_entry *p_ent = NULL;
1187 	struct qed_sp_init_data init_data;
1188 	u8 abs_vport_id = 0;
1189 	int rc, i;
1190 
1191 	if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1192 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1193 				  &abs_vport_id);
1194 		if (rc)
1195 			return rc;
1196 	} else {
1197 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1198 				  &abs_vport_id);
1199 		if (rc)
1200 			return rc;
1201 	}
1202 
1203 	/* Get SPQ entry */
1204 	memset(&init_data, 0, sizeof(init_data));
1205 	init_data.cid = qed_spq_get_cid(p_hwfn);
1206 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1207 	init_data.comp_mode = comp_mode;
1208 	init_data.p_comp_data = p_comp_data;
1209 
1210 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1211 				 ETH_RAMROD_VPORT_UPDATE,
1212 				 PROTOCOLID_ETH, &init_data);
1213 	if (rc) {
1214 		DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1215 		return rc;
1216 	}
1217 
1218 	p_ramrod = &p_ent->ramrod.vport_update;
1219 	p_ramrod->common.update_approx_mcast_flg = 1;
1220 
1221 	/* explicitly clear out the entire vector */
1222 	memset(&p_ramrod->approx_mcast.bins, 0,
1223 	       sizeof(p_ramrod->approx_mcast.bins));
1224 	memset(bins, 0, sizeof(unsigned long) *
1225 	       ETH_MULTICAST_MAC_BINS_IN_REGS);
1226 	/* filter ADD op is explicit set op and it removes
1227 	 *  any existing filters for the vport
1228 	 */
1229 	if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1230 		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1231 			u32 bit;
1232 
1233 			bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1234 			__set_bit(bit, bins);
1235 		}
1236 
1237 		/* Convert to correct endianity */
1238 		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1239 			u32 *p_bins = (u32 *)bins;
1240 			struct vport_update_ramrod_mcast *approx_mcast;
1241 
1242 			approx_mcast = &p_ramrod->approx_mcast;
1243 			approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
1244 		}
1245 	}
1246 
1247 	p_ramrod->common.vport_id = abs_vport_id;
1248 
1249 	return qed_spq_post(p_hwfn, p_ent, NULL);
1250 }
1251 
1252 static int qed_filter_mcast_cmd(struct qed_dev *cdev,
1253 				struct qed_filter_mcast *p_filter_cmd,
1254 				enum spq_mode comp_mode,
1255 				struct qed_spq_comp_cb *p_comp_data)
1256 {
1257 	int rc = 0;
1258 	int i;
1259 
1260 	/* only ADD and REMOVE operations are supported for multi-cast */
1261 	if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1262 	     (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1263 	    (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1264 		return -EINVAL;
1265 
1266 	for_each_hwfn(cdev, i) {
1267 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1268 
1269 		u16 opaque_fid;
1270 
1271 		if (IS_VF(cdev)) {
1272 			qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1273 			continue;
1274 		}
1275 
1276 		opaque_fid = p_hwfn->hw_info.opaque_fid;
1277 
1278 		rc = qed_sp_eth_filter_mcast(p_hwfn,
1279 					     opaque_fid,
1280 					     p_filter_cmd,
1281 					     comp_mode,
1282 					     p_comp_data);
1283 	}
1284 	return rc;
1285 }
1286 
1287 static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1288 				struct qed_filter_ucast *p_filter_cmd,
1289 				enum spq_mode comp_mode,
1290 				struct qed_spq_comp_cb *p_comp_data)
1291 {
1292 	int rc = 0;
1293 	int i;
1294 
1295 	for_each_hwfn(cdev, i) {
1296 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1297 		u16 opaque_fid;
1298 
1299 		if (IS_VF(cdev)) {
1300 			rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1301 			continue;
1302 		}
1303 
1304 		opaque_fid = p_hwfn->hw_info.opaque_fid;
1305 
1306 		rc = qed_sp_eth_filter_ucast(p_hwfn,
1307 					     opaque_fid,
1308 					     p_filter_cmd,
1309 					     comp_mode,
1310 					     p_comp_data);
1311 		if (rc != 0)
1312 			break;
1313 	}
1314 
1315 	return rc;
1316 }
1317 
1318 /* Statistics related code */
1319 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1320 					   u32 *p_addr,
1321 					   u32 *p_len, u16 statistics_bin)
1322 {
1323 	if (IS_PF(p_hwfn->cdev)) {
1324 		*p_addr = BAR0_MAP_REG_PSDM_RAM +
1325 		    PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1326 		*p_len = sizeof(struct eth_pstorm_per_queue_stat);
1327 	} else {
1328 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1329 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1330 
1331 		*p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1332 		*p_len = p_resp->pfdev_info.stats_info.pstats.len;
1333 	}
1334 }
1335 
1336 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
1337 				   struct qed_ptt *p_ptt,
1338 				   struct qed_eth_stats *p_stats,
1339 				   u16 statistics_bin)
1340 {
1341 	struct eth_pstorm_per_queue_stat pstats;
1342 	u32 pstats_addr = 0, pstats_len = 0;
1343 
1344 	__qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1345 				       statistics_bin);
1346 
1347 	memset(&pstats, 0, sizeof(pstats));
1348 	qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1349 
1350 	p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1351 	p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1352 	p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1353 	p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1354 	p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1355 	p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1356 	p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
1357 }
1358 
1359 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
1360 				   struct qed_ptt *p_ptt,
1361 				   struct qed_eth_stats *p_stats,
1362 				   u16 statistics_bin)
1363 {
1364 	struct tstorm_per_port_stat tstats;
1365 	u32 tstats_addr, tstats_len;
1366 
1367 	if (IS_PF(p_hwfn->cdev)) {
1368 		tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1369 		    TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1370 		tstats_len = sizeof(struct tstorm_per_port_stat);
1371 	} else {
1372 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1373 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1374 
1375 		tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1376 		tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1377 	}
1378 
1379 	memset(&tstats, 0, sizeof(tstats));
1380 	qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1381 
1382 	p_stats->mftag_filter_discards +=
1383 		HILO_64_REGPAIR(tstats.mftag_filter_discard);
1384 	p_stats->mac_filter_discards +=
1385 		HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1386 }
1387 
1388 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1389 					   u32 *p_addr,
1390 					   u32 *p_len, u16 statistics_bin)
1391 {
1392 	if (IS_PF(p_hwfn->cdev)) {
1393 		*p_addr = BAR0_MAP_REG_USDM_RAM +
1394 		    USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1395 		*p_len = sizeof(struct eth_ustorm_per_queue_stat);
1396 	} else {
1397 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1398 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1399 
1400 		*p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1401 		*p_len = p_resp->pfdev_info.stats_info.ustats.len;
1402 	}
1403 }
1404 
1405 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
1406 				   struct qed_ptt *p_ptt,
1407 				   struct qed_eth_stats *p_stats,
1408 				   u16 statistics_bin)
1409 {
1410 	struct eth_ustorm_per_queue_stat ustats;
1411 	u32 ustats_addr = 0, ustats_len = 0;
1412 
1413 	__qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1414 				       statistics_bin);
1415 
1416 	memset(&ustats, 0, sizeof(ustats));
1417 	qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1418 
1419 	p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1420 	p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1421 	p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1422 	p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1423 	p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1424 	p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1425 }
1426 
1427 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1428 					   u32 *p_addr,
1429 					   u32 *p_len, u16 statistics_bin)
1430 {
1431 	if (IS_PF(p_hwfn->cdev)) {
1432 		*p_addr = BAR0_MAP_REG_MSDM_RAM +
1433 		    MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1434 		*p_len = sizeof(struct eth_mstorm_per_queue_stat);
1435 	} else {
1436 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1437 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1438 
1439 		*p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1440 		*p_len = p_resp->pfdev_info.stats_info.mstats.len;
1441 	}
1442 }
1443 
1444 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
1445 				   struct qed_ptt *p_ptt,
1446 				   struct qed_eth_stats *p_stats,
1447 				   u16 statistics_bin)
1448 {
1449 	struct eth_mstorm_per_queue_stat mstats;
1450 	u32 mstats_addr = 0, mstats_len = 0;
1451 
1452 	__qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1453 				       statistics_bin);
1454 
1455 	memset(&mstats, 0, sizeof(mstats));
1456 	qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1457 
1458 	p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
1459 	p_stats->packet_too_big_discard +=
1460 		HILO_64_REGPAIR(mstats.packet_too_big_discard);
1461 	p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1462 	p_stats->tpa_coalesced_pkts +=
1463 		HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1464 	p_stats->tpa_coalesced_events +=
1465 		HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1466 	p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
1467 	p_stats->tpa_coalesced_bytes +=
1468 		HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1469 }
1470 
1471 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1472 				       struct qed_ptt *p_ptt,
1473 				       struct qed_eth_stats *p_stats)
1474 {
1475 	struct port_stats port_stats;
1476 	int j;
1477 
1478 	memset(&port_stats, 0, sizeof(port_stats));
1479 
1480 	qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1481 			p_hwfn->mcp_info->port_addr +
1482 			offsetof(struct public_port, stats),
1483 			sizeof(port_stats));
1484 
1485 	p_stats->rx_64_byte_packets		+= port_stats.pmm.r64;
1486 	p_stats->rx_65_to_127_byte_packets	+= port_stats.pmm.r127;
1487 	p_stats->rx_128_to_255_byte_packets	+= port_stats.pmm.r255;
1488 	p_stats->rx_256_to_511_byte_packets	+= port_stats.pmm.r511;
1489 	p_stats->rx_512_to_1023_byte_packets	+= port_stats.pmm.r1023;
1490 	p_stats->rx_1024_to_1518_byte_packets	+= port_stats.pmm.r1518;
1491 	p_stats->rx_1519_to_1522_byte_packets	+= port_stats.pmm.r1522;
1492 	p_stats->rx_1519_to_2047_byte_packets	+= port_stats.pmm.r2047;
1493 	p_stats->rx_2048_to_4095_byte_packets	+= port_stats.pmm.r4095;
1494 	p_stats->rx_4096_to_9216_byte_packets	+= port_stats.pmm.r9216;
1495 	p_stats->rx_9217_to_16383_byte_packets	+= port_stats.pmm.r16383;
1496 	p_stats->rx_crc_errors			+= port_stats.pmm.rfcs;
1497 	p_stats->rx_mac_crtl_frames		+= port_stats.pmm.rxcf;
1498 	p_stats->rx_pause_frames		+= port_stats.pmm.rxpf;
1499 	p_stats->rx_pfc_frames			+= port_stats.pmm.rxpp;
1500 	p_stats->rx_align_errors		+= port_stats.pmm.raln;
1501 	p_stats->rx_carrier_errors		+= port_stats.pmm.rfcr;
1502 	p_stats->rx_oversize_packets		+= port_stats.pmm.rovr;
1503 	p_stats->rx_jabbers			+= port_stats.pmm.rjbr;
1504 	p_stats->rx_undersize_packets		+= port_stats.pmm.rund;
1505 	p_stats->rx_fragments			+= port_stats.pmm.rfrg;
1506 	p_stats->tx_64_byte_packets		+= port_stats.pmm.t64;
1507 	p_stats->tx_65_to_127_byte_packets	+= port_stats.pmm.t127;
1508 	p_stats->tx_128_to_255_byte_packets	+= port_stats.pmm.t255;
1509 	p_stats->tx_256_to_511_byte_packets	+= port_stats.pmm.t511;
1510 	p_stats->tx_512_to_1023_byte_packets	+= port_stats.pmm.t1023;
1511 	p_stats->tx_1024_to_1518_byte_packets	+= port_stats.pmm.t1518;
1512 	p_stats->tx_1519_to_2047_byte_packets	+= port_stats.pmm.t2047;
1513 	p_stats->tx_2048_to_4095_byte_packets	+= port_stats.pmm.t4095;
1514 	p_stats->tx_4096_to_9216_byte_packets	+= port_stats.pmm.t9216;
1515 	p_stats->tx_9217_to_16383_byte_packets	+= port_stats.pmm.t16383;
1516 	p_stats->tx_pause_frames		+= port_stats.pmm.txpf;
1517 	p_stats->tx_pfc_frames			+= port_stats.pmm.txpp;
1518 	p_stats->tx_lpi_entry_count		+= port_stats.pmm.tlpiec;
1519 	p_stats->tx_total_collisions		+= port_stats.pmm.tncl;
1520 	p_stats->rx_mac_bytes			+= port_stats.pmm.rbyte;
1521 	p_stats->rx_mac_uc_packets		+= port_stats.pmm.rxuca;
1522 	p_stats->rx_mac_mc_packets		+= port_stats.pmm.rxmca;
1523 	p_stats->rx_mac_bc_packets		+= port_stats.pmm.rxbca;
1524 	p_stats->rx_mac_frames_ok		+= port_stats.pmm.rxpok;
1525 	p_stats->tx_mac_bytes			+= port_stats.pmm.tbyte;
1526 	p_stats->tx_mac_uc_packets		+= port_stats.pmm.txuca;
1527 	p_stats->tx_mac_mc_packets		+= port_stats.pmm.txmca;
1528 	p_stats->tx_mac_bc_packets		+= port_stats.pmm.txbca;
1529 	p_stats->tx_mac_ctrl_frames		+= port_stats.pmm.txcf;
1530 	for (j = 0; j < 8; j++) {
1531 		p_stats->brb_truncates	+= port_stats.brb.brb_truncate[j];
1532 		p_stats->brb_discards	+= port_stats.brb.brb_discard[j];
1533 	}
1534 }
1535 
1536 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1537 				  struct qed_ptt *p_ptt,
1538 				  struct qed_eth_stats *stats,
1539 				  u16 statistics_bin, bool b_get_port_stats)
1540 {
1541 	__qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1542 	__qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1543 	__qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1544 	__qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1545 
1546 	if (b_get_port_stats && p_hwfn->mcp_info)
1547 		__qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1548 }
1549 
1550 static void _qed_get_vport_stats(struct qed_dev *cdev,
1551 				 struct qed_eth_stats *stats)
1552 {
1553 	u8 fw_vport = 0;
1554 	int i;
1555 
1556 	memset(stats, 0, sizeof(*stats));
1557 
1558 	for_each_hwfn(cdev, i) {
1559 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1560 		struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1561 						    :  NULL;
1562 
1563 		if (IS_PF(cdev)) {
1564 			/* The main vport index is relative first */
1565 			if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1566 				DP_ERR(p_hwfn, "No vport available!\n");
1567 				goto out;
1568 			}
1569 		}
1570 
1571 		if (IS_PF(cdev) && !p_ptt) {
1572 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1573 			continue;
1574 		}
1575 
1576 		__qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1577 				      IS_PF(cdev) ? true : false);
1578 
1579 out:
1580 		if (IS_PF(cdev) && p_ptt)
1581 			qed_ptt_release(p_hwfn, p_ptt);
1582 	}
1583 }
1584 
1585 void qed_get_vport_stats(struct qed_dev *cdev,
1586 			 struct qed_eth_stats *stats)
1587 {
1588 	u32 i;
1589 
1590 	if (!cdev) {
1591 		memset(stats, 0, sizeof(*stats));
1592 		return;
1593 	}
1594 
1595 	_qed_get_vport_stats(cdev, stats);
1596 
1597 	if (!cdev->reset_stats)
1598 		return;
1599 
1600 	/* Reduce the statistics baseline */
1601 	for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1602 		((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1603 }
1604 
1605 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1606 void qed_reset_vport_stats(struct qed_dev *cdev)
1607 {
1608 	int i;
1609 
1610 	for_each_hwfn(cdev, i) {
1611 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1612 		struct eth_mstorm_per_queue_stat mstats;
1613 		struct eth_ustorm_per_queue_stat ustats;
1614 		struct eth_pstorm_per_queue_stat pstats;
1615 		struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1616 						    : NULL;
1617 		u32 addr = 0, len = 0;
1618 
1619 		if (IS_PF(cdev) && !p_ptt) {
1620 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1621 			continue;
1622 		}
1623 
1624 		memset(&mstats, 0, sizeof(mstats));
1625 		__qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1626 		qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1627 
1628 		memset(&ustats, 0, sizeof(ustats));
1629 		__qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1630 		qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1631 
1632 		memset(&pstats, 0, sizeof(pstats));
1633 		__qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1634 		qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1635 
1636 		if (IS_PF(cdev))
1637 			qed_ptt_release(p_hwfn, p_ptt);
1638 	}
1639 
1640 	/* PORT statistics are not necessarily reset, so we need to
1641 	 * read and create a baseline for future statistics.
1642 	 */
1643 	if (!cdev->reset_stats)
1644 		DP_INFO(cdev, "Reset stats not allocated\n");
1645 	else
1646 		_qed_get_vport_stats(cdev, cdev->reset_stats);
1647 }
1648 
1649 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
1650 				 struct qed_dev_eth_info *info)
1651 {
1652 	int i;
1653 
1654 	memset(info, 0, sizeof(*info));
1655 
1656 	info->num_tc = 1;
1657 
1658 	if (IS_PF(cdev)) {
1659 		if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
1660 			for_each_hwfn(cdev, i)
1661 			    info->num_queues +=
1662 			    FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
1663 			if (cdev->int_params.fp_msix_cnt)
1664 				info->num_queues =
1665 				    min_t(u8, info->num_queues,
1666 					  cdev->int_params.fp_msix_cnt);
1667 		} else {
1668 			info->num_queues = cdev->num_hwfns;
1669 		}
1670 
1671 		info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
1672 		ether_addr_copy(info->port_mac,
1673 				cdev->hwfns[0].hw_info.hw_mac_addr);
1674 	} else {
1675 		qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
1676 		if (cdev->num_hwfns > 1) {
1677 			u8 queues = 0;
1678 
1679 			qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
1680 			info->num_queues += queues;
1681 		}
1682 
1683 		qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
1684 					    &info->num_vlan_filters);
1685 		qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
1686 	}
1687 
1688 	qed_fill_dev_info(cdev, &info->common);
1689 
1690 	if (IS_VF(cdev))
1691 		memset(info->common.hw_mac, 0, ETH_ALEN);
1692 
1693 	return 0;
1694 }
1695 
1696 static void qed_register_eth_ops(struct qed_dev *cdev,
1697 				 struct qed_eth_cb_ops *ops, void *cookie)
1698 {
1699 	cdev->protocol_ops.eth = ops;
1700 	cdev->ops_cookie = cookie;
1701 
1702 	/* For VF, we start bulletin reading */
1703 	if (IS_VF(cdev))
1704 		qed_vf_start_iov_wq(cdev);
1705 }
1706 
1707 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
1708 {
1709 	if (IS_PF(cdev))
1710 		return true;
1711 
1712 	return qed_vf_check_mac(&cdev->hwfns[0], mac);
1713 }
1714 
1715 static int qed_start_vport(struct qed_dev *cdev,
1716 			   struct qed_start_vport_params *params)
1717 {
1718 	int rc, i;
1719 
1720 	for_each_hwfn(cdev, i) {
1721 		struct qed_sp_vport_start_params start = { 0 };
1722 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1723 
1724 		start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
1725 							QED_TPA_MODE_NONE;
1726 		start.remove_inner_vlan = params->remove_inner_vlan;
1727 		start.only_untagged = true;	/* untagged only */
1728 		start.drop_ttl0 = params->drop_ttl0;
1729 		start.opaque_fid = p_hwfn->hw_info.opaque_fid;
1730 		start.concrete_fid = p_hwfn->hw_info.concrete_fid;
1731 		start.vport_id = params->vport_id;
1732 		start.max_buffers_per_cqe = 16;
1733 		start.mtu = params->mtu;
1734 
1735 		rc = qed_sp_vport_start(p_hwfn, &start);
1736 		if (rc) {
1737 			DP_ERR(cdev, "Failed to start VPORT\n");
1738 			return rc;
1739 		}
1740 
1741 		qed_hw_start_fastpath(p_hwfn);
1742 
1743 		DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1744 			   "Started V-PORT %d with MTU %d\n",
1745 			   start.vport_id, start.mtu);
1746 	}
1747 
1748 	if (params->clear_stats)
1749 		qed_reset_vport_stats(cdev);
1750 
1751 	return 0;
1752 }
1753 
1754 static int qed_stop_vport(struct qed_dev *cdev,
1755 			  u8 vport_id)
1756 {
1757 	int rc, i;
1758 
1759 	for_each_hwfn(cdev, i) {
1760 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1761 
1762 		rc = qed_sp_vport_stop(p_hwfn,
1763 				       p_hwfn->hw_info.opaque_fid,
1764 				       vport_id);
1765 
1766 		if (rc) {
1767 			DP_ERR(cdev, "Failed to stop VPORT\n");
1768 			return rc;
1769 		}
1770 	}
1771 	return 0;
1772 }
1773 
1774 static int qed_update_vport(struct qed_dev *cdev,
1775 			    struct qed_update_vport_params *params)
1776 {
1777 	struct qed_sp_vport_update_params sp_params;
1778 	struct qed_rss_params sp_rss_params;
1779 	int rc, i;
1780 
1781 	if (!cdev)
1782 		return -ENODEV;
1783 
1784 	memset(&sp_params, 0, sizeof(sp_params));
1785 	memset(&sp_rss_params, 0, sizeof(sp_rss_params));
1786 
1787 	/* Translate protocol params into sp params */
1788 	sp_params.vport_id = params->vport_id;
1789 	sp_params.update_vport_active_rx_flg =
1790 		params->update_vport_active_flg;
1791 	sp_params.update_vport_active_tx_flg =
1792 		params->update_vport_active_flg;
1793 	sp_params.vport_active_rx_flg = params->vport_active_flg;
1794 	sp_params.vport_active_tx_flg = params->vport_active_flg;
1795 	sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
1796 	sp_params.tx_switching_flg = params->tx_switching_flg;
1797 	sp_params.accept_any_vlan = params->accept_any_vlan;
1798 	sp_params.update_accept_any_vlan_flg =
1799 		params->update_accept_any_vlan_flg;
1800 
1801 	/* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
1802 	 * We need to re-fix the rss values per engine for CMT.
1803 	 */
1804 	if (cdev->num_hwfns > 1 && params->update_rss_flg) {
1805 		struct qed_update_vport_rss_params *rss =
1806 			&params->rss_params;
1807 		int k, max = 0;
1808 
1809 		/* Find largest entry, since it's possible RSS needs to
1810 		 * be disabled [in case only 1 queue per-hwfn]
1811 		 */
1812 		for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1813 			max = (max > rss->rss_ind_table[k]) ?
1814 				max : rss->rss_ind_table[k];
1815 
1816 		/* Either fix RSS values or disable RSS */
1817 		if (cdev->num_hwfns < max + 1) {
1818 			int divisor = (max + cdev->num_hwfns - 1) /
1819 				cdev->num_hwfns;
1820 
1821 			DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1822 				   "CMT - fixing RSS values (modulo %02x)\n",
1823 				   divisor);
1824 
1825 			for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1826 				rss->rss_ind_table[k] =
1827 					rss->rss_ind_table[k] % divisor;
1828 		} else {
1829 			DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1830 				   "CMT - 1 queue per-hwfn; Disabling RSS\n");
1831 			params->update_rss_flg = 0;
1832 		}
1833 	}
1834 
1835 	/* Now, update the RSS configuration for actual configuration */
1836 	if (params->update_rss_flg) {
1837 		sp_rss_params.update_rss_config = 1;
1838 		sp_rss_params.rss_enable = 1;
1839 		sp_rss_params.update_rss_capabilities = 1;
1840 		sp_rss_params.update_rss_ind_table = 1;
1841 		sp_rss_params.update_rss_key = 1;
1842 		sp_rss_params.rss_caps = params->rss_params.rss_caps;
1843 		sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
1844 		memcpy(sp_rss_params.rss_ind_table,
1845 		       params->rss_params.rss_ind_table,
1846 		       QED_RSS_IND_TABLE_SIZE * sizeof(u16));
1847 		memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
1848 		       QED_RSS_KEY_SIZE * sizeof(u32));
1849 	}
1850 	sp_params.rss_params = &sp_rss_params;
1851 
1852 	for_each_hwfn(cdev, i) {
1853 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1854 
1855 		sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1856 		rc = qed_sp_vport_update(p_hwfn, &sp_params,
1857 					 QED_SPQ_MODE_EBLOCK,
1858 					 NULL);
1859 		if (rc) {
1860 			DP_ERR(cdev, "Failed to update VPORT\n");
1861 			return rc;
1862 		}
1863 
1864 		DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1865 			   "Updated V-PORT %d: active_flag %d [update %d]\n",
1866 			   params->vport_id, params->vport_active_flg,
1867 			   params->update_vport_active_flg);
1868 	}
1869 
1870 	return 0;
1871 }
1872 
1873 static int qed_start_rxq(struct qed_dev *cdev,
1874 			 struct qed_queue_start_common_params *params,
1875 			 u16 bd_max_bytes,
1876 			 dma_addr_t bd_chain_phys_addr,
1877 			 dma_addr_t cqe_pbl_addr,
1878 			 u16 cqe_pbl_size,
1879 			 void __iomem **pp_prod)
1880 {
1881 	int rc, hwfn_index;
1882 	struct qed_hwfn *p_hwfn;
1883 
1884 	hwfn_index = params->rss_id % cdev->num_hwfns;
1885 	p_hwfn = &cdev->hwfns[hwfn_index];
1886 
1887 	/* Fix queue ID in 100g mode */
1888 	params->queue_id /= cdev->num_hwfns;
1889 
1890 	rc = qed_sp_eth_rx_queue_start(p_hwfn,
1891 				       p_hwfn->hw_info.opaque_fid,
1892 				       params,
1893 				       bd_max_bytes,
1894 				       bd_chain_phys_addr,
1895 				       cqe_pbl_addr,
1896 				       cqe_pbl_size,
1897 				       pp_prod);
1898 
1899 	if (rc) {
1900 		DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
1901 		return rc;
1902 	}
1903 
1904 	DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1905 		   "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1906 		   params->queue_id, params->rss_id, params->vport_id,
1907 		   params->sb);
1908 
1909 	return 0;
1910 }
1911 
1912 static int qed_stop_rxq(struct qed_dev *cdev,
1913 			struct qed_stop_rxq_params *params)
1914 {
1915 	int rc, hwfn_index;
1916 	struct qed_hwfn *p_hwfn;
1917 
1918 	hwfn_index	= params->rss_id % cdev->num_hwfns;
1919 	p_hwfn		= &cdev->hwfns[hwfn_index];
1920 
1921 	rc = qed_sp_eth_rx_queue_stop(p_hwfn,
1922 				      params->rx_queue_id / cdev->num_hwfns,
1923 				      params->eq_completion_only,
1924 				      false);
1925 	if (rc) {
1926 		DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
1927 		return rc;
1928 	}
1929 
1930 	return 0;
1931 }
1932 
1933 static int qed_start_txq(struct qed_dev *cdev,
1934 			 struct qed_queue_start_common_params *p_params,
1935 			 dma_addr_t pbl_addr,
1936 			 u16 pbl_size,
1937 			 void __iomem **pp_doorbell)
1938 {
1939 	struct qed_hwfn *p_hwfn;
1940 	int rc, hwfn_index;
1941 
1942 	hwfn_index	= p_params->rss_id % cdev->num_hwfns;
1943 	p_hwfn		= &cdev->hwfns[hwfn_index];
1944 
1945 	/* Fix queue ID in 100g mode */
1946 	p_params->queue_id /= cdev->num_hwfns;
1947 
1948 	rc = qed_sp_eth_tx_queue_start(p_hwfn,
1949 				       p_hwfn->hw_info.opaque_fid,
1950 				       p_params,
1951 				       pbl_addr,
1952 				       pbl_size,
1953 				       pp_doorbell);
1954 
1955 	if (rc) {
1956 		DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
1957 		return rc;
1958 	}
1959 
1960 	DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1961 		   "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1962 		   p_params->queue_id, p_params->rss_id, p_params->vport_id,
1963 		   p_params->sb);
1964 
1965 	return 0;
1966 }
1967 
1968 #define QED_HW_STOP_RETRY_LIMIT (10)
1969 static int qed_fastpath_stop(struct qed_dev *cdev)
1970 {
1971 	qed_hw_stop_fastpath(cdev);
1972 
1973 	return 0;
1974 }
1975 
1976 static int qed_stop_txq(struct qed_dev *cdev,
1977 			struct qed_stop_txq_params *params)
1978 {
1979 	struct qed_hwfn *p_hwfn;
1980 	int rc, hwfn_index;
1981 
1982 	hwfn_index	= params->rss_id % cdev->num_hwfns;
1983 	p_hwfn		= &cdev->hwfns[hwfn_index];
1984 
1985 	rc = qed_sp_eth_tx_queue_stop(p_hwfn,
1986 				      params->tx_queue_id / cdev->num_hwfns);
1987 	if (rc) {
1988 		DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
1989 		return rc;
1990 	}
1991 
1992 	return 0;
1993 }
1994 
1995 static int qed_tunn_configure(struct qed_dev *cdev,
1996 			      struct qed_tunn_params *tunn_params)
1997 {
1998 	struct qed_tunn_update_params tunn_info;
1999 	int i, rc;
2000 
2001 	if (IS_VF(cdev))
2002 		return 0;
2003 
2004 	memset(&tunn_info, 0, sizeof(tunn_info));
2005 	if (tunn_params->update_vxlan_port == 1) {
2006 		tunn_info.update_vxlan_udp_port = 1;
2007 		tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
2008 	}
2009 
2010 	if (tunn_params->update_geneve_port == 1) {
2011 		tunn_info.update_geneve_udp_port = 1;
2012 		tunn_info.geneve_udp_port = tunn_params->geneve_port;
2013 	}
2014 
2015 	for_each_hwfn(cdev, i) {
2016 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
2017 
2018 		rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
2019 					       QED_SPQ_MODE_EBLOCK, NULL);
2020 
2021 		if (rc)
2022 			return rc;
2023 	}
2024 
2025 	return 0;
2026 }
2027 
2028 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2029 					enum qed_filter_rx_mode_type type)
2030 {
2031 	struct qed_filter_accept_flags accept_flags;
2032 
2033 	memset(&accept_flags, 0, sizeof(accept_flags));
2034 
2035 	accept_flags.update_rx_mode_config	= 1;
2036 	accept_flags.update_tx_mode_config	= 1;
2037 	accept_flags.rx_accept_filter		= QED_ACCEPT_UCAST_MATCHED |
2038 						  QED_ACCEPT_MCAST_MATCHED |
2039 						  QED_ACCEPT_BCAST;
2040 	accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2041 					QED_ACCEPT_MCAST_MATCHED |
2042 					QED_ACCEPT_BCAST;
2043 
2044 	if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
2045 		accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2046 						 QED_ACCEPT_MCAST_UNMATCHED;
2047 	else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
2048 		accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2049 
2050 	return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
2051 				     QED_SPQ_MODE_CB, NULL);
2052 }
2053 
2054 static int qed_configure_filter_ucast(struct qed_dev *cdev,
2055 				      struct qed_filter_ucast_params *params)
2056 {
2057 	struct qed_filter_ucast ucast;
2058 
2059 	if (!params->vlan_valid && !params->mac_valid) {
2060 		DP_NOTICE(
2061 			cdev,
2062 			"Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
2063 		return -EINVAL;
2064 	}
2065 
2066 	memset(&ucast, 0, sizeof(ucast));
2067 	switch (params->type) {
2068 	case QED_FILTER_XCAST_TYPE_ADD:
2069 		ucast.opcode = QED_FILTER_ADD;
2070 		break;
2071 	case QED_FILTER_XCAST_TYPE_DEL:
2072 		ucast.opcode = QED_FILTER_REMOVE;
2073 		break;
2074 	case QED_FILTER_XCAST_TYPE_REPLACE:
2075 		ucast.opcode = QED_FILTER_REPLACE;
2076 		break;
2077 	default:
2078 		DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
2079 			  params->type);
2080 	}
2081 
2082 	if (params->vlan_valid && params->mac_valid) {
2083 		ucast.type = QED_FILTER_MAC_VLAN;
2084 		ether_addr_copy(ucast.mac, params->mac);
2085 		ucast.vlan = params->vlan;
2086 	} else if (params->mac_valid) {
2087 		ucast.type = QED_FILTER_MAC;
2088 		ether_addr_copy(ucast.mac, params->mac);
2089 	} else {
2090 		ucast.type = QED_FILTER_VLAN;
2091 		ucast.vlan = params->vlan;
2092 	}
2093 
2094 	ucast.is_rx_filter = true;
2095 	ucast.is_tx_filter = true;
2096 
2097 	return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
2098 }
2099 
2100 static int qed_configure_filter_mcast(struct qed_dev *cdev,
2101 				      struct qed_filter_mcast_params *params)
2102 {
2103 	struct qed_filter_mcast mcast;
2104 	int i;
2105 
2106 	memset(&mcast, 0, sizeof(mcast));
2107 	switch (params->type) {
2108 	case QED_FILTER_XCAST_TYPE_ADD:
2109 		mcast.opcode = QED_FILTER_ADD;
2110 		break;
2111 	case QED_FILTER_XCAST_TYPE_DEL:
2112 		mcast.opcode = QED_FILTER_REMOVE;
2113 		break;
2114 	default:
2115 		DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
2116 			  params->type);
2117 	}
2118 
2119 	mcast.num_mc_addrs = params->num;
2120 	for (i = 0; i < mcast.num_mc_addrs; i++)
2121 		ether_addr_copy(mcast.mac[i], params->mac[i]);
2122 
2123 	return qed_filter_mcast_cmd(cdev, &mcast,
2124 				    QED_SPQ_MODE_CB, NULL);
2125 }
2126 
2127 static int qed_configure_filter(struct qed_dev *cdev,
2128 				struct qed_filter_params *params)
2129 {
2130 	enum qed_filter_rx_mode_type accept_flags;
2131 
2132 	switch (params->type) {
2133 	case QED_FILTER_TYPE_UCAST:
2134 		return qed_configure_filter_ucast(cdev, &params->filter.ucast);
2135 	case QED_FILTER_TYPE_MCAST:
2136 		return qed_configure_filter_mcast(cdev, &params->filter.mcast);
2137 	case QED_FILTER_TYPE_RX_MODE:
2138 		accept_flags = params->filter.accept_flags;
2139 		return qed_configure_filter_rx_mode(cdev, accept_flags);
2140 	default:
2141 		DP_NOTICE(cdev, "Unknown filter type %d\n",
2142 			  (int)params->type);
2143 		return -EINVAL;
2144 	}
2145 }
2146 
2147 static int qed_fp_cqe_completion(struct qed_dev *dev,
2148 				 u8 rss_id,
2149 				 struct eth_slow_path_rx_cqe *cqe)
2150 {
2151 	return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2152 				      cqe);
2153 }
2154 
2155 #ifdef CONFIG_QED_SRIOV
2156 extern const struct qed_iov_hv_ops qed_iov_ops_pass;
2157 #endif
2158 
2159 static const struct qed_eth_ops qed_eth_ops_pass = {
2160 	.common = &qed_common_ops_pass,
2161 #ifdef CONFIG_QED_SRIOV
2162 	.iov = &qed_iov_ops_pass,
2163 #endif
2164 	.fill_dev_info = &qed_fill_eth_dev_info,
2165 	.register_ops = &qed_register_eth_ops,
2166 	.check_mac = &qed_check_mac,
2167 	.vport_start = &qed_start_vport,
2168 	.vport_stop = &qed_stop_vport,
2169 	.vport_update = &qed_update_vport,
2170 	.q_rx_start = &qed_start_rxq,
2171 	.q_rx_stop = &qed_stop_rxq,
2172 	.q_tx_start = &qed_start_txq,
2173 	.q_tx_stop = &qed_stop_txq,
2174 	.filter_config = &qed_configure_filter,
2175 	.fastpath_stop = &qed_fastpath_stop,
2176 	.eth_cqe_completion = &qed_fp_cqe_completion,
2177 	.get_vport_stats = &qed_get_vport_stats,
2178 	.tunn_config = &qed_tunn_configure,
2179 };
2180 
2181 const struct qed_eth_ops *qed_get_eth_ops(void)
2182 {
2183 	return &qed_eth_ops_pass;
2184 }
2185 EXPORT_SYMBOL(qed_get_eth_ops);
2186 
2187 void qed_put_eth_ops(void)
2188 {
2189 	/* TODO - reference count for module? */
2190 }
2191 EXPORT_SYMBOL(qed_put_eth_ops);
2192