1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <asm/param.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/etherdevice.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/slab.h>
20 #include <linux/stddef.h>
21 #include <linux/string.h>
22 #include <linux/version.h>
23 #include <linux/workqueue.h>
24 #include <linux/bitops.h>
25 #include <linux/bug.h>
26 #include "qed.h"
27 #include <linux/qed/qed_chain.h>
28 #include "qed_cxt.h"
29 #include "qed_dev_api.h"
30 #include <linux/qed/qed_eth_if.h>
31 #include "qed_hsi.h"
32 #include "qed_hw.h"
33 #include "qed_int.h"
34 #include "qed_mcp.h"
35 #include "qed_reg_addr.h"
36 #include "qed_sp.h"
37 
38 enum qed_rss_caps {
39 	QED_RSS_IPV4		= 0x1,
40 	QED_RSS_IPV6		= 0x2,
41 	QED_RSS_IPV4_TCP	= 0x4,
42 	QED_RSS_IPV6_TCP	= 0x8,
43 	QED_RSS_IPV4_UDP	= 0x10,
44 	QED_RSS_IPV6_UDP	= 0x20,
45 };
46 
47 /* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
48 #define QED_RSS_IND_TABLE_SIZE 128
49 #define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
50 
51 struct qed_rss_params {
52 	u8	update_rss_config;
53 	u8	rss_enable;
54 	u8	rss_eng_id;
55 	u8	update_rss_capabilities;
56 	u8	update_rss_ind_table;
57 	u8	update_rss_key;
58 	u8	rss_caps;
59 	u8	rss_table_size_log;
60 	u16	rss_ind_table[QED_RSS_IND_TABLE_SIZE];
61 	u32	rss_key[QED_RSS_KEY_SIZE];
62 };
63 
64 enum qed_filter_opcode {
65 	QED_FILTER_ADD,
66 	QED_FILTER_REMOVE,
67 	QED_FILTER_MOVE,
68 	QED_FILTER_REPLACE,     /* Delete all MACs and add new one instead */
69 	QED_FILTER_FLUSH,       /* Removes all filters */
70 };
71 
72 enum qed_filter_ucast_type {
73 	QED_FILTER_MAC,
74 	QED_FILTER_VLAN,
75 	QED_FILTER_MAC_VLAN,
76 	QED_FILTER_INNER_MAC,
77 	QED_FILTER_INNER_VLAN,
78 	QED_FILTER_INNER_PAIR,
79 	QED_FILTER_INNER_MAC_VNI_PAIR,
80 	QED_FILTER_MAC_VNI_PAIR,
81 	QED_FILTER_VNI,
82 };
83 
84 struct qed_filter_ucast {
85 	enum qed_filter_opcode		opcode;
86 	enum qed_filter_ucast_type	type;
87 	u8				is_rx_filter;
88 	u8				is_tx_filter;
89 	u8				vport_to_add_to;
90 	u8				vport_to_remove_from;
91 	unsigned char			mac[ETH_ALEN];
92 	u8				assert_on_error;
93 	u16				vlan;
94 	u32				vni;
95 };
96 
97 struct qed_filter_mcast {
98 	/* MOVE is not supported for multicast */
99 	enum qed_filter_opcode	opcode;
100 	u8			vport_to_add_to;
101 	u8			vport_to_remove_from;
102 	u8			num_mc_addrs;
103 #define QED_MAX_MC_ADDRS        64
104 	unsigned char		mac[QED_MAX_MC_ADDRS][ETH_ALEN];
105 };
106 
107 struct qed_filter_accept_flags {
108 	u8	update_rx_mode_config;
109 	u8	update_tx_mode_config;
110 	u8	rx_accept_filter;
111 	u8	tx_accept_filter;
112 #define QED_ACCEPT_NONE         0x01
113 #define QED_ACCEPT_UCAST_MATCHED        0x02
114 #define QED_ACCEPT_UCAST_UNMATCHED      0x04
115 #define QED_ACCEPT_MCAST_MATCHED        0x08
116 #define QED_ACCEPT_MCAST_UNMATCHED      0x10
117 #define QED_ACCEPT_BCAST                0x20
118 };
119 
120 struct qed_sp_vport_update_params {
121 	u16				opaque_fid;
122 	u8				vport_id;
123 	u8				update_vport_active_rx_flg;
124 	u8				vport_active_rx_flg;
125 	u8				update_vport_active_tx_flg;
126 	u8				vport_active_tx_flg;
127 	u8				update_approx_mcast_flg;
128 	u8				update_accept_any_vlan_flg;
129 	u8				accept_any_vlan;
130 	unsigned long			bins[8];
131 	struct qed_rss_params		*rss_params;
132 	struct qed_filter_accept_flags	accept_flags;
133 };
134 
135 #define QED_MAX_SGES_NUM 16
136 #define CRC32_POLY 0x1edc6f41
137 
138 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
139 			      u32 concrete_fid,
140 			      u16 opaque_fid,
141 			      u8 vport_id,
142 			      u16 mtu,
143 			      u8 drop_ttl0_flg,
144 			      u8 inner_vlan_removal_en_flg)
145 {
146 	struct vport_start_ramrod_data *p_ramrod = NULL;
147 	struct qed_spq_entry *p_ent =  NULL;
148 	struct qed_sp_init_data init_data;
149 	int rc = -EINVAL;
150 	u16 rx_mode = 0;
151 	u8 abs_vport_id = 0;
152 
153 	rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
154 	if (rc != 0)
155 		return rc;
156 
157 	memset(&init_data, 0, sizeof(init_data));
158 	init_data.cid = qed_spq_get_cid(p_hwfn);
159 	init_data.opaque_fid = opaque_fid;
160 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
161 
162 	rc = qed_sp_init_request(p_hwfn, &p_ent,
163 				 ETH_RAMROD_VPORT_START,
164 				 PROTOCOLID_ETH, &init_data);
165 	if (rc)
166 		return rc;
167 
168 	p_ramrod		= &p_ent->ramrod.vport_start;
169 	p_ramrod->vport_id	= abs_vport_id;
170 
171 	p_ramrod->mtu			= cpu_to_le16(mtu);
172 	p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg;
173 	p_ramrod->drop_ttl0_en		= drop_ttl0_flg;
174 
175 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
176 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
177 
178 	p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
179 
180 	/* TPA related fields */
181 	memset(&p_ramrod->tpa_param, 0,
182 	       sizeof(struct eth_vport_tpa_param));
183 
184 	/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
185 	p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
186 						  concrete_fid);
187 
188 	return qed_spq_post(p_hwfn, p_ent, NULL);
189 }
190 
191 static int
192 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
193 			struct vport_update_ramrod_data *p_ramrod,
194 			struct qed_rss_params *p_params)
195 {
196 	struct eth_vport_rss_config *rss = &p_ramrod->rss_config;
197 	u16 abs_l2_queue = 0, capabilities = 0;
198 	int rc = 0, i;
199 
200 	if (!p_params) {
201 		p_ramrod->common.update_rss_flg = 0;
202 		return rc;
203 	}
204 
205 	BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE !=
206 		     ETH_RSS_IND_TABLE_ENTRIES_NUM);
207 
208 	rc = qed_fw_rss_eng(p_hwfn, p_params->rss_eng_id, &rss->rss_id);
209 	if (rc)
210 		return rc;
211 
212 	p_ramrod->common.update_rss_flg = p_params->update_rss_config;
213 	rss->update_rss_capabilities = p_params->update_rss_capabilities;
214 	rss->update_rss_ind_table = p_params->update_rss_ind_table;
215 	rss->update_rss_key = p_params->update_rss_key;
216 
217 	rss->rss_mode = p_params->rss_enable ?
218 			ETH_VPORT_RSS_MODE_REGULAR :
219 			ETH_VPORT_RSS_MODE_DISABLED;
220 
221 	SET_FIELD(capabilities,
222 		  ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
223 		  !!(p_params->rss_caps & QED_RSS_IPV4));
224 	SET_FIELD(capabilities,
225 		  ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
226 		  !!(p_params->rss_caps & QED_RSS_IPV6));
227 	SET_FIELD(capabilities,
228 		  ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
229 		  !!(p_params->rss_caps & QED_RSS_IPV4_TCP));
230 	SET_FIELD(capabilities,
231 		  ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
232 		  !!(p_params->rss_caps & QED_RSS_IPV6_TCP));
233 	SET_FIELD(capabilities,
234 		  ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
235 		  !!(p_params->rss_caps & QED_RSS_IPV4_UDP));
236 	SET_FIELD(capabilities,
237 		  ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
238 		  !!(p_params->rss_caps & QED_RSS_IPV6_UDP));
239 	rss->tbl_size = p_params->rss_table_size_log;
240 
241 	rss->capabilities = cpu_to_le16(capabilities);
242 
243 	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
244 		   "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
245 		   p_ramrod->common.update_rss_flg,
246 		   rss->rss_mode, rss->update_rss_capabilities,
247 		   capabilities, rss->update_rss_ind_table,
248 		   rss->update_rss_key);
249 
250 	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
251 		rc = qed_fw_l2_queue(p_hwfn,
252 				     (u8)p_params->rss_ind_table[i],
253 				     &abs_l2_queue);
254 		if (rc)
255 			return rc;
256 
257 		rss->indirection_table[i] = cpu_to_le16(abs_l2_queue);
258 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "i= %d, queue = %d\n",
259 			   i, rss->indirection_table[i]);
260 	}
261 
262 	for (i = 0; i < 10; i++)
263 		rss->rss_key[i] = cpu_to_le32(p_params->rss_key[i]);
264 
265 	return rc;
266 }
267 
268 static void
269 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
270 			  struct vport_update_ramrod_data *p_ramrod,
271 			  struct qed_filter_accept_flags accept_flags)
272 {
273 	p_ramrod->common.update_rx_mode_flg =
274 		accept_flags.update_rx_mode_config;
275 
276 	p_ramrod->common.update_tx_mode_flg =
277 		accept_flags.update_tx_mode_config;
278 
279 	/* Set Rx mode accept flags */
280 	if (p_ramrod->common.update_rx_mode_flg) {
281 		u8 accept_filter = accept_flags.rx_accept_filter;
282 		u16 state = 0;
283 
284 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
285 			  !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
286 			    !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
287 
288 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
289 			  !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
290 
291 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
292 			  !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
293 			    !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
294 
295 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
296 			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
297 			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
298 
299 		SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
300 			  !!(accept_filter & QED_ACCEPT_BCAST));
301 
302 		p_ramrod->rx_mode.state = cpu_to_le16(state);
303 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
304 			   "p_ramrod->rx_mode.state = 0x%x\n", state);
305 	}
306 
307 	/* Set Tx mode accept flags */
308 	if (p_ramrod->common.update_tx_mode_flg) {
309 		u8 accept_filter = accept_flags.tx_accept_filter;
310 		u16 state = 0;
311 
312 		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
313 			  !!(accept_filter & QED_ACCEPT_NONE));
314 
315 		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
316 			  (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
317 			   !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
318 
319 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
320 			  !!(accept_filter & QED_ACCEPT_NONE));
321 
322 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
323 			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
324 			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
325 
326 		SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
327 			  !!(accept_filter & QED_ACCEPT_BCAST));
328 
329 		p_ramrod->tx_mode.state = cpu_to_le16(state);
330 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
331 			   "p_ramrod->tx_mode.state = 0x%x\n", state);
332 	}
333 }
334 
335 static void
336 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
337 			struct vport_update_ramrod_data *p_ramrod,
338 			struct qed_sp_vport_update_params *p_params)
339 {
340 	int i;
341 
342 	memset(&p_ramrod->approx_mcast.bins, 0,
343 	       sizeof(p_ramrod->approx_mcast.bins));
344 
345 	if (p_params->update_approx_mcast_flg) {
346 		p_ramrod->common.update_approx_mcast_flg = 1;
347 		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
348 			u32 *p_bins = (u32 *)p_params->bins;
349 			__le32 val = cpu_to_le32(p_bins[i]);
350 
351 			p_ramrod->approx_mcast.bins[i] = val;
352 		}
353 	}
354 }
355 
356 static int
357 qed_sp_vport_update(struct qed_hwfn *p_hwfn,
358 		    struct qed_sp_vport_update_params *p_params,
359 		    enum spq_mode comp_mode,
360 		    struct qed_spq_comp_cb *p_comp_data)
361 {
362 	struct qed_rss_params *p_rss_params = p_params->rss_params;
363 	struct vport_update_ramrod_data_cmn *p_cmn;
364 	struct qed_sp_init_data init_data;
365 	struct vport_update_ramrod_data *p_ramrod = NULL;
366 	struct qed_spq_entry *p_ent = NULL;
367 	u8 abs_vport_id = 0;
368 	int rc = -EINVAL;
369 
370 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
371 	if (rc != 0)
372 		return rc;
373 
374 	memset(&init_data, 0, sizeof(init_data));
375 	init_data.cid = qed_spq_get_cid(p_hwfn);
376 	init_data.opaque_fid = p_params->opaque_fid;
377 	init_data.comp_mode = comp_mode;
378 	init_data.p_comp_data = p_comp_data;
379 
380 	rc = qed_sp_init_request(p_hwfn, &p_ent,
381 				 ETH_RAMROD_VPORT_UPDATE,
382 				 PROTOCOLID_ETH, &init_data);
383 	if (rc)
384 		return rc;
385 
386 	/* Copy input params to ramrod according to FW struct */
387 	p_ramrod = &p_ent->ramrod.vport_update;
388 	p_cmn = &p_ramrod->common;
389 
390 	p_cmn->vport_id = abs_vport_id;
391 	p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
392 	p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
393 	p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
394 	p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
395 	p_cmn->accept_any_vlan = p_params->accept_any_vlan;
396 	p_cmn->update_accept_any_vlan_flg =
397 			p_params->update_accept_any_vlan_flg;
398 	rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
399 	if (rc) {
400 		/* Return spq entry which is taken in qed_sp_init_request()*/
401 		qed_spq_return_entry(p_hwfn, p_ent);
402 		return rc;
403 	}
404 
405 	/* Update mcast bins for VFs, PF doesn't use this functionality */
406 	qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
407 
408 	qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
409 	return qed_spq_post(p_hwfn, p_ent, NULL);
410 }
411 
412 static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
413 			     u16 opaque_fid,
414 			     u8 vport_id)
415 {
416 	struct vport_stop_ramrod_data *p_ramrod;
417 	struct qed_sp_init_data init_data;
418 	struct qed_spq_entry *p_ent;
419 	u8 abs_vport_id = 0;
420 	int rc;
421 
422 	rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
423 	if (rc != 0)
424 		return rc;
425 
426 	memset(&init_data, 0, sizeof(init_data));
427 	init_data.cid = qed_spq_get_cid(p_hwfn);
428 	init_data.opaque_fid = opaque_fid;
429 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
430 
431 	rc = qed_sp_init_request(p_hwfn, &p_ent,
432 				 ETH_RAMROD_VPORT_STOP,
433 				 PROTOCOLID_ETH, &init_data);
434 	if (rc)
435 		return rc;
436 
437 	p_ramrod = &p_ent->ramrod.vport_stop;
438 	p_ramrod->vport_id = abs_vport_id;
439 
440 	return qed_spq_post(p_hwfn, p_ent, NULL);
441 }
442 
443 static int qed_filter_accept_cmd(struct qed_dev *cdev,
444 				 u8 vport,
445 				 struct qed_filter_accept_flags accept_flags,
446 				 u8 update_accept_any_vlan,
447 				 u8 accept_any_vlan,
448 				enum spq_mode comp_mode,
449 				struct qed_spq_comp_cb *p_comp_data)
450 {
451 	struct qed_sp_vport_update_params vport_update_params;
452 	int i, rc;
453 
454 	/* Prepare and send the vport rx_mode change */
455 	memset(&vport_update_params, 0, sizeof(vport_update_params));
456 	vport_update_params.vport_id = vport;
457 	vport_update_params.accept_flags = accept_flags;
458 	vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
459 	vport_update_params.accept_any_vlan = accept_any_vlan;
460 
461 	for_each_hwfn(cdev, i) {
462 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
463 
464 		vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
465 
466 		rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
467 					 comp_mode, p_comp_data);
468 		if (rc != 0) {
469 			DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
470 			return rc;
471 		}
472 
473 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
474 			   "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
475 			   accept_flags.rx_accept_filter,
476 			   accept_flags.tx_accept_filter);
477 		if (update_accept_any_vlan)
478 			DP_VERBOSE(p_hwfn, QED_MSG_SP,
479 				   "accept_any_vlan=%d configured\n",
480 				   accept_any_vlan);
481 	}
482 
483 	return 0;
484 }
485 
486 static int qed_sp_release_queue_cid(
487 	struct qed_hwfn *p_hwfn,
488 	struct qed_hw_cid_data *p_cid_data)
489 {
490 	if (!p_cid_data->b_cid_allocated)
491 		return 0;
492 
493 	qed_cxt_release_cid(p_hwfn, p_cid_data->cid);
494 
495 	p_cid_data->b_cid_allocated = false;
496 
497 	return 0;
498 }
499 
500 static int
501 qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
502 			    u16 opaque_fid,
503 			    u32 cid,
504 			    struct qed_queue_start_common_params *params,
505 			    u8 stats_id,
506 			    u16 bd_max_bytes,
507 			    dma_addr_t bd_chain_phys_addr,
508 			    dma_addr_t cqe_pbl_addr,
509 			    u16 cqe_pbl_size)
510 {
511 	struct rx_queue_start_ramrod_data *p_ramrod = NULL;
512 	struct qed_spq_entry *p_ent = NULL;
513 	struct qed_sp_init_data init_data;
514 	struct qed_hw_cid_data *p_rx_cid;
515 	u16 abs_rx_q_id = 0;
516 	u8 abs_vport_id = 0;
517 	int rc = -EINVAL;
518 
519 	/* Store information for the stop */
520 	p_rx_cid		= &p_hwfn->p_rx_cids[params->queue_id];
521 	p_rx_cid->cid		= cid;
522 	p_rx_cid->opaque_fid	= opaque_fid;
523 	p_rx_cid->vport_id	= params->vport_id;
524 
525 	rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_vport_id);
526 	if (rc != 0)
527 		return rc;
528 
529 	rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_rx_q_id);
530 	if (rc != 0)
531 		return rc;
532 
533 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
534 		   "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
535 		   opaque_fid, cid, params->queue_id, params->vport_id,
536 		   params->sb);
537 
538 	/* Get SPQ entry */
539 	memset(&init_data, 0, sizeof(init_data));
540 	init_data.cid = cid;
541 	init_data.opaque_fid = opaque_fid;
542 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
543 
544 	rc = qed_sp_init_request(p_hwfn, &p_ent,
545 				 ETH_RAMROD_RX_QUEUE_START,
546 				 PROTOCOLID_ETH, &init_data);
547 	if (rc)
548 		return rc;
549 
550 	p_ramrod = &p_ent->ramrod.rx_queue_start;
551 
552 	p_ramrod->sb_id			= cpu_to_le16(params->sb);
553 	p_ramrod->sb_index		= params->sb_idx;
554 	p_ramrod->vport_id		= abs_vport_id;
555 	p_ramrod->stats_counter_id	= stats_id;
556 	p_ramrod->rx_queue_id		= cpu_to_le16(abs_rx_q_id);
557 	p_ramrod->complete_cqe_flg	= 0;
558 	p_ramrod->complete_event_flg	= 1;
559 
560 	p_ramrod->bd_max_bytes	= cpu_to_le16(bd_max_bytes);
561 	DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
562 
563 	p_ramrod->num_of_pbl_pages	= cpu_to_le16(cqe_pbl_size);
564 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
565 
566 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
567 
568 	return rc;
569 }
570 
571 static int
572 qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
573 			  u16 opaque_fid,
574 			  struct qed_queue_start_common_params *params,
575 			  u16 bd_max_bytes,
576 			  dma_addr_t bd_chain_phys_addr,
577 			  dma_addr_t cqe_pbl_addr,
578 			  u16 cqe_pbl_size,
579 			  void __iomem **pp_prod)
580 {
581 	struct qed_hw_cid_data *p_rx_cid;
582 	u64 init_prod_val = 0;
583 	u16 abs_l2_queue = 0;
584 	u8 abs_stats_id = 0;
585 	int rc;
586 
587 	rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
588 	if (rc != 0)
589 		return rc;
590 
591 	rc = qed_fw_vport(p_hwfn, params->vport_id, &abs_stats_id);
592 	if (rc != 0)
593 		return rc;
594 
595 	*pp_prod = (u8 __iomem *)p_hwfn->regview +
596 				 GTT_BAR0_MAP_REG_MSDM_RAM +
597 				 MSTORM_PRODS_OFFSET(abs_l2_queue);
598 
599 	/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
600 	__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
601 			  (u32 *)(&init_prod_val));
602 
603 	/* Allocate a CID for the queue */
604 	p_rx_cid = &p_hwfn->p_rx_cids[params->queue_id];
605 	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
606 				 &p_rx_cid->cid);
607 	if (rc) {
608 		DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
609 		return rc;
610 	}
611 	p_rx_cid->b_cid_allocated = true;
612 
613 	rc = qed_sp_eth_rxq_start_ramrod(p_hwfn,
614 					 opaque_fid,
615 					 p_rx_cid->cid,
616 					 params,
617 					 abs_stats_id,
618 					 bd_max_bytes,
619 					 bd_chain_phys_addr,
620 					 cqe_pbl_addr,
621 					 cqe_pbl_size);
622 
623 	if (rc != 0)
624 		qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
625 
626 	return rc;
627 }
628 
629 static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
630 				    u16 rx_queue_id,
631 				    bool eq_completion_only,
632 				    bool cqe_completion)
633 {
634 	struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
635 	struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
636 	struct qed_spq_entry *p_ent = NULL;
637 	struct qed_sp_init_data init_data;
638 	u16 abs_rx_q_id = 0;
639 	int rc = -EINVAL;
640 
641 	/* Get SPQ entry */
642 	memset(&init_data, 0, sizeof(init_data));
643 	init_data.cid = p_rx_cid->cid;
644 	init_data.opaque_fid = p_rx_cid->opaque_fid;
645 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
646 
647 	rc = qed_sp_init_request(p_hwfn, &p_ent,
648 				 ETH_RAMROD_RX_QUEUE_STOP,
649 				 PROTOCOLID_ETH, &init_data);
650 	if (rc)
651 		return rc;
652 
653 	p_ramrod = &p_ent->ramrod.rx_queue_stop;
654 
655 	qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
656 	qed_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
657 	p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
658 
659 	/* Cleaning the queue requires the completion to arrive there.
660 	 * In addition, VFs require the answer to come as eqe to PF.
661 	 */
662 	p_ramrod->complete_cqe_flg =
663 		(!!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) &&
664 		 !eq_completion_only) || cqe_completion;
665 	p_ramrod->complete_event_flg =
666 		!(p_rx_cid->opaque_fid == p_hwfn->hw_info.opaque_fid) ||
667 		eq_completion_only;
668 
669 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
670 	if (rc)
671 		return rc;
672 
673 	return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
674 }
675 
676 static int
677 qed_sp_eth_txq_start_ramrod(struct qed_hwfn  *p_hwfn,
678 			    u16  opaque_fid,
679 			    u32  cid,
680 			    struct qed_queue_start_common_params *p_params,
681 			    u8  stats_id,
682 			    dma_addr_t pbl_addr,
683 			    u16 pbl_size,
684 			    union qed_qm_pq_params *p_pq_params)
685 {
686 	struct tx_queue_start_ramrod_data *p_ramrod = NULL;
687 	struct qed_spq_entry *p_ent = NULL;
688 	struct qed_sp_init_data init_data;
689 	struct qed_hw_cid_data *p_tx_cid;
690 	u8 abs_vport_id;
691 	int rc = -EINVAL;
692 	u16 pq_id;
693 
694 	/* Store information for the stop */
695 	p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
696 	p_tx_cid->cid		= cid;
697 	p_tx_cid->opaque_fid	= opaque_fid;
698 
699 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
700 	if (rc)
701 		return rc;
702 
703 	/* Get SPQ entry */
704 	memset(&init_data, 0, sizeof(init_data));
705 	init_data.cid = cid;
706 	init_data.opaque_fid = opaque_fid;
707 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
708 
709 	rc = qed_sp_init_request(p_hwfn, &p_ent,
710 				 ETH_RAMROD_TX_QUEUE_START,
711 				 PROTOCOLID_ETH, &init_data);
712 	if (rc)
713 		return rc;
714 
715 	p_ramrod		= &p_ent->ramrod.tx_queue_start;
716 	p_ramrod->vport_id	= abs_vport_id;
717 
718 	p_ramrod->sb_id			= cpu_to_le16(p_params->sb);
719 	p_ramrod->sb_index		= p_params->sb_idx;
720 	p_ramrod->stats_counter_id	= stats_id;
721 
722 	p_ramrod->pbl_size		= cpu_to_le16(pbl_size);
723 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
724 
725 	pq_id			= qed_get_qm_pq(p_hwfn,
726 						PROTOCOLID_ETH,
727 						p_pq_params);
728 	p_ramrod->qm_pq_id	= cpu_to_le16(pq_id);
729 
730 	return qed_spq_post(p_hwfn, p_ent, NULL);
731 }
732 
733 static int
734 qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
735 			  u16 opaque_fid,
736 			  struct qed_queue_start_common_params *p_params,
737 			  dma_addr_t pbl_addr,
738 			  u16 pbl_size,
739 			  void __iomem **pp_doorbell)
740 {
741 	struct qed_hw_cid_data *p_tx_cid;
742 	union qed_qm_pq_params pq_params;
743 	u8 abs_stats_id = 0;
744 	int rc;
745 
746 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
747 	if (rc)
748 		return rc;
749 
750 	p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
751 	memset(p_tx_cid, 0, sizeof(*p_tx_cid));
752 	memset(&pq_params, 0, sizeof(pq_params));
753 
754 	/* Allocate a CID for the queue */
755 	rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
756 				 &p_tx_cid->cid);
757 	if (rc) {
758 		DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
759 		return rc;
760 	}
761 	p_tx_cid->b_cid_allocated = true;
762 
763 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
764 		   "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
765 		   opaque_fid, p_tx_cid->cid,
766 		   p_params->queue_id, p_params->vport_id, p_params->sb);
767 
768 	rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
769 					 opaque_fid,
770 					 p_tx_cid->cid,
771 					 p_params,
772 					 abs_stats_id,
773 					 pbl_addr,
774 					 pbl_size,
775 					 &pq_params);
776 
777 	*pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
778 				     qed_db_addr(p_tx_cid->cid, DQ_DEMS_LEGACY);
779 
780 	if (rc)
781 		qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
782 
783 	return rc;
784 }
785 
786 static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
787 				    u16 tx_queue_id)
788 {
789 	struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
790 	struct qed_spq_entry *p_ent = NULL;
791 	struct qed_sp_init_data init_data;
792 	int rc = -EINVAL;
793 
794 	/* Get SPQ entry */
795 	memset(&init_data, 0, sizeof(init_data));
796 	init_data.cid = p_tx_cid->cid;
797 	init_data.opaque_fid = p_tx_cid->opaque_fid;
798 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
799 
800 	rc = qed_sp_init_request(p_hwfn, &p_ent,
801 				 ETH_RAMROD_TX_QUEUE_STOP,
802 				 PROTOCOLID_ETH, &init_data);
803 	if (rc)
804 		return rc;
805 
806 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
807 	if (rc)
808 		return rc;
809 
810 	return qed_sp_release_queue_cid(p_hwfn, p_tx_cid);
811 }
812 
813 static enum eth_filter_action
814 qed_filter_action(enum qed_filter_opcode opcode)
815 {
816 	enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
817 
818 	switch (opcode) {
819 	case QED_FILTER_ADD:
820 		action = ETH_FILTER_ACTION_ADD;
821 		break;
822 	case QED_FILTER_REMOVE:
823 		action = ETH_FILTER_ACTION_REMOVE;
824 		break;
825 	case QED_FILTER_FLUSH:
826 		action = ETH_FILTER_ACTION_REMOVE_ALL;
827 		break;
828 	default:
829 		action = MAX_ETH_FILTER_ACTION;
830 	}
831 
832 	return action;
833 }
834 
835 static void qed_set_fw_mac_addr(__le16 *fw_msb,
836 				__le16 *fw_mid,
837 				__le16 *fw_lsb,
838 				u8 *mac)
839 {
840 	((u8 *)fw_msb)[0] = mac[1];
841 	((u8 *)fw_msb)[1] = mac[0];
842 	((u8 *)fw_mid)[0] = mac[3];
843 	((u8 *)fw_mid)[1] = mac[2];
844 	((u8 *)fw_lsb)[0] = mac[5];
845 	((u8 *)fw_lsb)[1] = mac[4];
846 }
847 
848 static int
849 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
850 			u16 opaque_fid,
851 			struct qed_filter_ucast *p_filter_cmd,
852 			struct vport_filter_update_ramrod_data **pp_ramrod,
853 			struct qed_spq_entry **pp_ent,
854 			enum spq_mode comp_mode,
855 			struct qed_spq_comp_cb *p_comp_data)
856 {
857 	u8 vport_to_add_to = 0, vport_to_remove_from = 0;
858 	struct vport_filter_update_ramrod_data *p_ramrod;
859 	struct eth_filter_cmd *p_first_filter;
860 	struct eth_filter_cmd *p_second_filter;
861 	struct qed_sp_init_data init_data;
862 	enum eth_filter_action action;
863 	int rc;
864 
865 	rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
866 			  &vport_to_remove_from);
867 	if (rc)
868 		return rc;
869 
870 	rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
871 			  &vport_to_add_to);
872 	if (rc)
873 		return rc;
874 
875 	/* Get SPQ entry */
876 	memset(&init_data, 0, sizeof(init_data));
877 	init_data.cid = qed_spq_get_cid(p_hwfn);
878 	init_data.opaque_fid = opaque_fid;
879 	init_data.comp_mode = comp_mode;
880 	init_data.p_comp_data = p_comp_data;
881 
882 	rc = qed_sp_init_request(p_hwfn, pp_ent,
883 				 ETH_RAMROD_FILTERS_UPDATE,
884 				 PROTOCOLID_ETH, &init_data);
885 	if (rc)
886 		return rc;
887 
888 	*pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
889 	p_ramrod = *pp_ramrod;
890 	p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
891 	p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
892 
893 	switch (p_filter_cmd->opcode) {
894 	case QED_FILTER_REPLACE:
895 	case QED_FILTER_MOVE:
896 		p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
897 	default:
898 		p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
899 	}
900 
901 	p_first_filter	= &p_ramrod->filter_cmds[0];
902 	p_second_filter = &p_ramrod->filter_cmds[1];
903 
904 	switch (p_filter_cmd->type) {
905 	case QED_FILTER_MAC:
906 		p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
907 	case QED_FILTER_VLAN:
908 		p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
909 	case QED_FILTER_MAC_VLAN:
910 		p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
911 	case QED_FILTER_INNER_MAC:
912 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
913 	case QED_FILTER_INNER_VLAN:
914 		p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
915 	case QED_FILTER_INNER_PAIR:
916 		p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
917 	case QED_FILTER_INNER_MAC_VNI_PAIR:
918 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
919 		break;
920 	case QED_FILTER_MAC_VNI_PAIR:
921 		p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
922 	case QED_FILTER_VNI:
923 		p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
924 	}
925 
926 	if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
927 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
928 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
929 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
930 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
931 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
932 		qed_set_fw_mac_addr(&p_first_filter->mac_msb,
933 				    &p_first_filter->mac_mid,
934 				    &p_first_filter->mac_lsb,
935 				    (u8 *)p_filter_cmd->mac);
936 	}
937 
938 	if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
939 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
940 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
941 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
942 		p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
943 
944 	if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
945 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
946 	    (p_first_filter->type == ETH_FILTER_TYPE_VNI))
947 		p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
948 
949 	if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
950 		p_second_filter->type		= p_first_filter->type;
951 		p_second_filter->mac_msb	= p_first_filter->mac_msb;
952 		p_second_filter->mac_mid	= p_first_filter->mac_mid;
953 		p_second_filter->mac_lsb	= p_first_filter->mac_lsb;
954 		p_second_filter->vlan_id	= p_first_filter->vlan_id;
955 		p_second_filter->vni		= p_first_filter->vni;
956 
957 		p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
958 
959 		p_first_filter->vport_id = vport_to_remove_from;
960 
961 		p_second_filter->action		= ETH_FILTER_ACTION_ADD;
962 		p_second_filter->vport_id	= vport_to_add_to;
963 	} else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
964 		p_first_filter->vport_id = vport_to_add_to;
965 		memcpy(p_second_filter, p_first_filter,
966 		       sizeof(*p_second_filter));
967 		p_first_filter->action	= ETH_FILTER_ACTION_REMOVE_ALL;
968 		p_second_filter->action = ETH_FILTER_ACTION_ADD;
969 	} else {
970 		action = qed_filter_action(p_filter_cmd->opcode);
971 
972 		if (action == MAX_ETH_FILTER_ACTION) {
973 			DP_NOTICE(p_hwfn,
974 				  "%d is not supported yet\n",
975 				  p_filter_cmd->opcode);
976 			return -EINVAL;
977 		}
978 
979 		p_first_filter->action = action;
980 		p_first_filter->vport_id = (p_filter_cmd->opcode ==
981 					    QED_FILTER_REMOVE) ?
982 					   vport_to_remove_from :
983 					   vport_to_add_to;
984 	}
985 
986 	return 0;
987 }
988 
989 static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
990 				   u16 opaque_fid,
991 				   struct qed_filter_ucast *p_filter_cmd,
992 				   enum spq_mode comp_mode,
993 				   struct qed_spq_comp_cb *p_comp_data)
994 {
995 	struct vport_filter_update_ramrod_data	*p_ramrod	= NULL;
996 	struct qed_spq_entry			*p_ent		= NULL;
997 	struct eth_filter_cmd_header		*p_header;
998 	int					rc;
999 
1000 	rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1001 				     &p_ramrod, &p_ent,
1002 				     comp_mode, p_comp_data);
1003 	if (rc != 0) {
1004 		DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1005 		return rc;
1006 	}
1007 	p_header = &p_ramrod->filter_cmd_hdr;
1008 	p_header->assert_on_error = p_filter_cmd->assert_on_error;
1009 
1010 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1011 	if (rc != 0) {
1012 		DP_ERR(p_hwfn,
1013 		       "Unicast filter ADD command failed %d\n",
1014 		       rc);
1015 		return rc;
1016 	}
1017 
1018 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
1019 		   "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1020 		   (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1021 		   ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1022 		   "REMOVE" :
1023 		   ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1024 		    "MOVE" : "REPLACE")),
1025 		   (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1026 		   ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1027 		    "VLAN" : "MAC & VLAN"),
1028 		   p_ramrod->filter_cmd_hdr.cmd_cnt,
1029 		   p_filter_cmd->is_rx_filter,
1030 		   p_filter_cmd->is_tx_filter);
1031 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
1032 		   "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1033 		   p_filter_cmd->vport_to_add_to,
1034 		   p_filter_cmd->vport_to_remove_from,
1035 		   p_filter_cmd->mac[0],
1036 		   p_filter_cmd->mac[1],
1037 		   p_filter_cmd->mac[2],
1038 		   p_filter_cmd->mac[3],
1039 		   p_filter_cmd->mac[4],
1040 		   p_filter_cmd->mac[5],
1041 		   p_filter_cmd->vlan);
1042 
1043 	return 0;
1044 }
1045 
1046 /*******************************************************************************
1047  * Description:
1048  *         Calculates crc 32 on a buffer
1049  *         Note: crc32_length MUST be aligned to 8
1050  * Return:
1051  ******************************************************************************/
1052 static u32 qed_calc_crc32c(u8 *crc32_packet,
1053 			   u32 crc32_length,
1054 			   u32 crc32_seed,
1055 			   u8 complement)
1056 {
1057 	u32 byte = 0;
1058 	u32 bit = 0;
1059 	u8 msb = 0;
1060 	u8 current_byte = 0;
1061 	u32 crc32_result = crc32_seed;
1062 
1063 	if ((!crc32_packet) ||
1064 	    (crc32_length == 0) ||
1065 	    ((crc32_length % 8) != 0))
1066 		return crc32_result;
1067 	for (byte = 0; byte < crc32_length; byte++) {
1068 		current_byte = crc32_packet[byte];
1069 		for (bit = 0; bit < 8; bit++) {
1070 			msb = (u8)(crc32_result >> 31);
1071 			crc32_result = crc32_result << 1;
1072 			if (msb != (0x1 & (current_byte >> bit))) {
1073 				crc32_result = crc32_result ^ CRC32_POLY;
1074 				crc32_result |= 1; /*crc32_result[0] = 1;*/
1075 			}
1076 		}
1077 	}
1078 	return crc32_result;
1079 }
1080 
1081 static inline u32 qed_crc32c_le(u32 seed,
1082 				u8 *mac,
1083 				u32 len)
1084 {
1085 	u32 packet_buf[2] = { 0 };
1086 
1087 	memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1088 	return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1089 }
1090 
1091 static u8 qed_mcast_bin_from_mac(u8 *mac)
1092 {
1093 	u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1094 				mac, ETH_ALEN);
1095 
1096 	return crc & 0xff;
1097 }
1098 
1099 static int
1100 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1101 			u16 opaque_fid,
1102 			struct qed_filter_mcast *p_filter_cmd,
1103 			enum spq_mode comp_mode,
1104 			struct qed_spq_comp_cb *p_comp_data)
1105 {
1106 	unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1107 	struct vport_update_ramrod_data *p_ramrod = NULL;
1108 	struct qed_spq_entry *p_ent = NULL;
1109 	struct qed_sp_init_data init_data;
1110 	u8 abs_vport_id = 0;
1111 	int rc, i;
1112 
1113 	if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1114 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1115 				  &abs_vport_id);
1116 		if (rc)
1117 			return rc;
1118 	} else {
1119 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1120 				  &abs_vport_id);
1121 		if (rc)
1122 			return rc;
1123 	}
1124 
1125 	/* Get SPQ entry */
1126 	memset(&init_data, 0, sizeof(init_data));
1127 	init_data.cid = qed_spq_get_cid(p_hwfn);
1128 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1129 	init_data.comp_mode = comp_mode;
1130 	init_data.p_comp_data = p_comp_data;
1131 
1132 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1133 				 ETH_RAMROD_VPORT_UPDATE,
1134 				 PROTOCOLID_ETH, &init_data);
1135 	if (rc) {
1136 		DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1137 		return rc;
1138 	}
1139 
1140 	p_ramrod = &p_ent->ramrod.vport_update;
1141 	p_ramrod->common.update_approx_mcast_flg = 1;
1142 
1143 	/* explicitly clear out the entire vector */
1144 	memset(&p_ramrod->approx_mcast.bins, 0,
1145 	       sizeof(p_ramrod->approx_mcast.bins));
1146 	memset(bins, 0, sizeof(unsigned long) *
1147 	       ETH_MULTICAST_MAC_BINS_IN_REGS);
1148 	/* filter ADD op is explicit set op and it removes
1149 	 *  any existing filters for the vport
1150 	 */
1151 	if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1152 		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1153 			u32 bit;
1154 
1155 			bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1156 			__set_bit(bit, bins);
1157 		}
1158 
1159 		/* Convert to correct endianity */
1160 		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1161 			u32 *p_bins = (u32 *)bins;
1162 			struct vport_update_ramrod_mcast *approx_mcast;
1163 
1164 			approx_mcast = &p_ramrod->approx_mcast;
1165 			approx_mcast->bins[i] = cpu_to_le32(p_bins[i]);
1166 		}
1167 	}
1168 
1169 	p_ramrod->common.vport_id = abs_vport_id;
1170 
1171 	return qed_spq_post(p_hwfn, p_ent, NULL);
1172 }
1173 
1174 static int
1175 qed_filter_mcast_cmd(struct qed_dev *cdev,
1176 		     struct qed_filter_mcast *p_filter_cmd,
1177 		     enum spq_mode comp_mode,
1178 		     struct qed_spq_comp_cb *p_comp_data)
1179 {
1180 	int rc = 0;
1181 	int i;
1182 
1183 	/* only ADD and REMOVE operations are supported for multi-cast */
1184 	if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1185 	     (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1186 	    (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1187 		return -EINVAL;
1188 
1189 	for_each_hwfn(cdev, i) {
1190 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1191 
1192 		u16 opaque_fid;
1193 
1194 		if (rc != 0)
1195 			break;
1196 
1197 		opaque_fid = p_hwfn->hw_info.opaque_fid;
1198 
1199 		rc = qed_sp_eth_filter_mcast(p_hwfn,
1200 					     opaque_fid,
1201 					     p_filter_cmd,
1202 					     comp_mode,
1203 					     p_comp_data);
1204 	}
1205 	return rc;
1206 }
1207 
1208 static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1209 				struct qed_filter_ucast *p_filter_cmd,
1210 				enum spq_mode comp_mode,
1211 				struct qed_spq_comp_cb *p_comp_data)
1212 {
1213 	int rc = 0;
1214 	int i;
1215 
1216 	for_each_hwfn(cdev, i) {
1217 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1218 		u16 opaque_fid;
1219 
1220 		if (rc != 0)
1221 			break;
1222 
1223 		opaque_fid = p_hwfn->hw_info.opaque_fid;
1224 
1225 		rc = qed_sp_eth_filter_ucast(p_hwfn,
1226 					     opaque_fid,
1227 					     p_filter_cmd,
1228 					     comp_mode,
1229 					     p_comp_data);
1230 	}
1231 
1232 	return rc;
1233 }
1234 
1235 /* Statistics related code */
1236 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1237 					   u32 *p_addr,
1238 					   u32 *p_len,
1239 					   u16 statistics_bin)
1240 {
1241 	*p_addr = BAR0_MAP_REG_PSDM_RAM +
1242 		  PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1243 	*p_len = sizeof(struct eth_pstorm_per_queue_stat);
1244 }
1245 
1246 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
1247 				   struct qed_ptt *p_ptt,
1248 				   struct qed_eth_stats *p_stats,
1249 				   u16 statistics_bin)
1250 {
1251 	struct eth_pstorm_per_queue_stat pstats;
1252 	u32 pstats_addr = 0, pstats_len = 0;
1253 
1254 	__qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1255 				       statistics_bin);
1256 
1257 	memset(&pstats, 0, sizeof(pstats));
1258 	qed_memcpy_from(p_hwfn, p_ptt, &pstats,
1259 			pstats_addr, pstats_len);
1260 
1261 	p_stats->tx_ucast_bytes +=
1262 		HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1263 	p_stats->tx_mcast_bytes +=
1264 		HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1265 	p_stats->tx_bcast_bytes +=
1266 		HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1267 	p_stats->tx_ucast_pkts +=
1268 		HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1269 	p_stats->tx_mcast_pkts +=
1270 		HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1271 	p_stats->tx_bcast_pkts +=
1272 		HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1273 	p_stats->tx_err_drop_pkts +=
1274 		HILO_64_REGPAIR(pstats.error_drop_pkts);
1275 }
1276 
1277 static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn,
1278 					   u32 *p_addr,
1279 					   u32 *p_len)
1280 {
1281 	*p_addr = BAR0_MAP_REG_TSDM_RAM +
1282 		  TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1283 	*p_len = sizeof(struct tstorm_per_port_stat);
1284 }
1285 
1286 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
1287 				   struct qed_ptt *p_ptt,
1288 				   struct qed_eth_stats *p_stats,
1289 				   u16 statistics_bin)
1290 {
1291 	u32 tstats_addr = 0, tstats_len = 0;
1292 	struct tstorm_per_port_stat tstats;
1293 
1294 	__qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len);
1295 
1296 	memset(&tstats, 0, sizeof(tstats));
1297 	qed_memcpy_from(p_hwfn, p_ptt, &tstats,
1298 			tstats_addr, tstats_len);
1299 
1300 	p_stats->mftag_filter_discards +=
1301 		HILO_64_REGPAIR(tstats.mftag_filter_discard);
1302 	p_stats->mac_filter_discards +=
1303 		HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1304 }
1305 
1306 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1307 					   u32 *p_addr,
1308 					   u32 *p_len,
1309 					   u16 statistics_bin)
1310 {
1311 	*p_addr = BAR0_MAP_REG_USDM_RAM +
1312 		  USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1313 	*p_len = sizeof(struct eth_ustorm_per_queue_stat);
1314 }
1315 
1316 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
1317 				   struct qed_ptt *p_ptt,
1318 				   struct qed_eth_stats *p_stats,
1319 				   u16 statistics_bin)
1320 {
1321 	struct eth_ustorm_per_queue_stat ustats;
1322 	u32 ustats_addr = 0, ustats_len = 0;
1323 
1324 	__qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1325 				       statistics_bin);
1326 
1327 	memset(&ustats, 0, sizeof(ustats));
1328 	qed_memcpy_from(p_hwfn, p_ptt, &ustats,
1329 			ustats_addr, ustats_len);
1330 
1331 	p_stats->rx_ucast_bytes +=
1332 		HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1333 	p_stats->rx_mcast_bytes +=
1334 		HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1335 	p_stats->rx_bcast_bytes +=
1336 		HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1337 	p_stats->rx_ucast_pkts +=
1338 		HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1339 	p_stats->rx_mcast_pkts +=
1340 		HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1341 	p_stats->rx_bcast_pkts +=
1342 		HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1343 }
1344 
1345 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1346 					   u32 *p_addr,
1347 					   u32 *p_len,
1348 					   u16 statistics_bin)
1349 {
1350 	*p_addr = BAR0_MAP_REG_MSDM_RAM +
1351 		  MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1352 	*p_len = sizeof(struct eth_mstorm_per_queue_stat);
1353 }
1354 
1355 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
1356 				   struct qed_ptt *p_ptt,
1357 				   struct qed_eth_stats *p_stats,
1358 				   u16 statistics_bin)
1359 {
1360 	struct eth_mstorm_per_queue_stat mstats;
1361 	u32 mstats_addr = 0, mstats_len = 0;
1362 
1363 	__qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1364 				       statistics_bin);
1365 
1366 	memset(&mstats, 0, sizeof(mstats));
1367 	qed_memcpy_from(p_hwfn, p_ptt, &mstats,
1368 			mstats_addr, mstats_len);
1369 
1370 	p_stats->no_buff_discards +=
1371 		HILO_64_REGPAIR(mstats.no_buff_discard);
1372 	p_stats->packet_too_big_discard +=
1373 		HILO_64_REGPAIR(mstats.packet_too_big_discard);
1374 	p_stats->ttl0_discard +=
1375 		HILO_64_REGPAIR(mstats.ttl0_discard);
1376 	p_stats->tpa_coalesced_pkts +=
1377 		HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1378 	p_stats->tpa_coalesced_events +=
1379 		HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1380 	p_stats->tpa_aborts_num +=
1381 		HILO_64_REGPAIR(mstats.tpa_aborts_num);
1382 	p_stats->tpa_coalesced_bytes +=
1383 		HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1384 }
1385 
1386 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1387 				       struct qed_ptt *p_ptt,
1388 				       struct qed_eth_stats *p_stats)
1389 {
1390 	struct port_stats port_stats;
1391 	int j;
1392 
1393 	memset(&port_stats, 0, sizeof(port_stats));
1394 
1395 	qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1396 			p_hwfn->mcp_info->port_addr +
1397 			offsetof(struct public_port, stats),
1398 			sizeof(port_stats));
1399 
1400 	p_stats->rx_64_byte_packets		+= port_stats.pmm.r64;
1401 	p_stats->rx_127_byte_packets		+= port_stats.pmm.r127;
1402 	p_stats->rx_255_byte_packets		+= port_stats.pmm.r255;
1403 	p_stats->rx_511_byte_packets		+= port_stats.pmm.r511;
1404 	p_stats->rx_1023_byte_packets		+= port_stats.pmm.r1023;
1405 	p_stats->rx_1518_byte_packets		+= port_stats.pmm.r1518;
1406 	p_stats->rx_1522_byte_packets		+= port_stats.pmm.r1522;
1407 	p_stats->rx_2047_byte_packets		+= port_stats.pmm.r2047;
1408 	p_stats->rx_4095_byte_packets		+= port_stats.pmm.r4095;
1409 	p_stats->rx_9216_byte_packets		+= port_stats.pmm.r9216;
1410 	p_stats->rx_16383_byte_packets		+= port_stats.pmm.r16383;
1411 	p_stats->rx_crc_errors			+= port_stats.pmm.rfcs;
1412 	p_stats->rx_mac_crtl_frames		+= port_stats.pmm.rxcf;
1413 	p_stats->rx_pause_frames		+= port_stats.pmm.rxpf;
1414 	p_stats->rx_pfc_frames			+= port_stats.pmm.rxpp;
1415 	p_stats->rx_align_errors		+= port_stats.pmm.raln;
1416 	p_stats->rx_carrier_errors		+= port_stats.pmm.rfcr;
1417 	p_stats->rx_oversize_packets		+= port_stats.pmm.rovr;
1418 	p_stats->rx_jabbers			+= port_stats.pmm.rjbr;
1419 	p_stats->rx_undersize_packets		+= port_stats.pmm.rund;
1420 	p_stats->rx_fragments			+= port_stats.pmm.rfrg;
1421 	p_stats->tx_64_byte_packets		+= port_stats.pmm.t64;
1422 	p_stats->tx_65_to_127_byte_packets	+= port_stats.pmm.t127;
1423 	p_stats->tx_128_to_255_byte_packets	+= port_stats.pmm.t255;
1424 	p_stats->tx_256_to_511_byte_packets	+= port_stats.pmm.t511;
1425 	p_stats->tx_512_to_1023_byte_packets	+= port_stats.pmm.t1023;
1426 	p_stats->tx_1024_to_1518_byte_packets	+= port_stats.pmm.t1518;
1427 	p_stats->tx_1519_to_2047_byte_packets	+= port_stats.pmm.t2047;
1428 	p_stats->tx_2048_to_4095_byte_packets	+= port_stats.pmm.t4095;
1429 	p_stats->tx_4096_to_9216_byte_packets	+= port_stats.pmm.t9216;
1430 	p_stats->tx_9217_to_16383_byte_packets	+= port_stats.pmm.t16383;
1431 	p_stats->tx_pause_frames		+= port_stats.pmm.txpf;
1432 	p_stats->tx_pfc_frames			+= port_stats.pmm.txpp;
1433 	p_stats->tx_lpi_entry_count		+= port_stats.pmm.tlpiec;
1434 	p_stats->tx_total_collisions		+= port_stats.pmm.tncl;
1435 	p_stats->rx_mac_bytes			+= port_stats.pmm.rbyte;
1436 	p_stats->rx_mac_uc_packets		+= port_stats.pmm.rxuca;
1437 	p_stats->rx_mac_mc_packets		+= port_stats.pmm.rxmca;
1438 	p_stats->rx_mac_bc_packets		+= port_stats.pmm.rxbca;
1439 	p_stats->rx_mac_frames_ok		+= port_stats.pmm.rxpok;
1440 	p_stats->tx_mac_bytes			+= port_stats.pmm.tbyte;
1441 	p_stats->tx_mac_uc_packets		+= port_stats.pmm.txuca;
1442 	p_stats->tx_mac_mc_packets		+= port_stats.pmm.txmca;
1443 	p_stats->tx_mac_bc_packets		+= port_stats.pmm.txbca;
1444 	p_stats->tx_mac_ctrl_frames		+= port_stats.pmm.txcf;
1445 	for (j = 0; j < 8; j++) {
1446 		p_stats->brb_truncates	+= port_stats.brb.brb_truncate[j];
1447 		p_stats->brb_discards	+= port_stats.brb.brb_discard[j];
1448 	}
1449 }
1450 
1451 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1452 				  struct qed_ptt *p_ptt,
1453 				  struct qed_eth_stats *stats,
1454 				  u16 statistics_bin)
1455 {
1456 	__qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1457 	__qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1458 	__qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1459 	__qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1460 
1461 	if (p_hwfn->mcp_info)
1462 		__qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1463 }
1464 
1465 static void _qed_get_vport_stats(struct qed_dev *cdev,
1466 				 struct qed_eth_stats *stats)
1467 {
1468 	u8	fw_vport = 0;
1469 	int	i;
1470 
1471 	memset(stats, 0, sizeof(*stats));
1472 
1473 	for_each_hwfn(cdev, i) {
1474 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1475 		struct qed_ptt *p_ptt;
1476 
1477 		/* The main vport index is relative first */
1478 		if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1479 			DP_ERR(p_hwfn, "No vport available!\n");
1480 			continue;
1481 		}
1482 
1483 		p_ptt = qed_ptt_acquire(p_hwfn);
1484 		if (!p_ptt) {
1485 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1486 			continue;
1487 		}
1488 
1489 		__qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport);
1490 
1491 		qed_ptt_release(p_hwfn, p_ptt);
1492 	}
1493 }
1494 
1495 void qed_get_vport_stats(struct qed_dev *cdev,
1496 			 struct qed_eth_stats *stats)
1497 {
1498 	u32 i;
1499 
1500 	if (!cdev) {
1501 		memset(stats, 0, sizeof(*stats));
1502 		return;
1503 	}
1504 
1505 	_qed_get_vport_stats(cdev, stats);
1506 
1507 	if (!cdev->reset_stats)
1508 		return;
1509 
1510 	/* Reduce the statistics baseline */
1511 	for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1512 		((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1513 }
1514 
1515 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1516 void qed_reset_vport_stats(struct qed_dev *cdev)
1517 {
1518 	int i;
1519 
1520 	for_each_hwfn(cdev, i) {
1521 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1522 		struct eth_mstorm_per_queue_stat mstats;
1523 		struct eth_ustorm_per_queue_stat ustats;
1524 		struct eth_pstorm_per_queue_stat pstats;
1525 		struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
1526 		u32 addr = 0, len = 0;
1527 
1528 		if (!p_ptt) {
1529 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1530 			continue;
1531 		}
1532 
1533 		memset(&mstats, 0, sizeof(mstats));
1534 		__qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1535 		qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1536 
1537 		memset(&ustats, 0, sizeof(ustats));
1538 		__qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1539 		qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1540 
1541 		memset(&pstats, 0, sizeof(pstats));
1542 		__qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1543 		qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1544 
1545 		qed_ptt_release(p_hwfn, p_ptt);
1546 	}
1547 
1548 	/* PORT statistics are not necessarily reset, so we need to
1549 	 * read and create a baseline for future statistics.
1550 	 */
1551 	if (!cdev->reset_stats)
1552 		DP_INFO(cdev, "Reset stats not allocated\n");
1553 	else
1554 		_qed_get_vport_stats(cdev, cdev->reset_stats);
1555 }
1556 
1557 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
1558 				 struct qed_dev_eth_info *info)
1559 {
1560 	int i;
1561 
1562 	memset(info, 0, sizeof(*info));
1563 
1564 	info->num_tc = 1;
1565 
1566 	if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
1567 		for_each_hwfn(cdev, i)
1568 			info->num_queues += FEAT_NUM(&cdev->hwfns[i],
1569 						     QED_PF_L2_QUE);
1570 		if (cdev->int_params.fp_msix_cnt)
1571 			info->num_queues = min_t(u8, info->num_queues,
1572 						 cdev->int_params.fp_msix_cnt);
1573 	} else {
1574 		info->num_queues = cdev->num_hwfns;
1575 	}
1576 
1577 	info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
1578 	ether_addr_copy(info->port_mac,
1579 			cdev->hwfns[0].hw_info.hw_mac_addr);
1580 
1581 	qed_fill_dev_info(cdev, &info->common);
1582 
1583 	return 0;
1584 }
1585 
1586 static void qed_register_eth_ops(struct qed_dev *cdev,
1587 				 struct qed_eth_cb_ops *ops,
1588 				 void *cookie)
1589 {
1590 	cdev->protocol_ops.eth	= ops;
1591 	cdev->ops_cookie	= cookie;
1592 }
1593 
1594 static int qed_start_vport(struct qed_dev *cdev,
1595 			   u8 vport_id,
1596 			   u16 mtu,
1597 			   u8 drop_ttl0_flg,
1598 			   u8 inner_vlan_removal_en_flg)
1599 {
1600 	int rc, i;
1601 
1602 	for_each_hwfn(cdev, i) {
1603 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1604 
1605 		rc = qed_sp_vport_start(p_hwfn,
1606 					p_hwfn->hw_info.concrete_fid,
1607 					p_hwfn->hw_info.opaque_fid,
1608 					vport_id,
1609 					mtu,
1610 					drop_ttl0_flg,
1611 					inner_vlan_removal_en_flg);
1612 
1613 		if (rc) {
1614 			DP_ERR(cdev, "Failed to start VPORT\n");
1615 			return rc;
1616 		}
1617 
1618 		qed_hw_start_fastpath(p_hwfn);
1619 
1620 		DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1621 			   "Started V-PORT %d with MTU %d\n",
1622 			   vport_id, mtu);
1623 	}
1624 
1625 	qed_reset_vport_stats(cdev);
1626 
1627 	return 0;
1628 }
1629 
1630 static int qed_stop_vport(struct qed_dev *cdev,
1631 			  u8 vport_id)
1632 {
1633 	int rc, i;
1634 
1635 	for_each_hwfn(cdev, i) {
1636 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1637 
1638 		rc = qed_sp_vport_stop(p_hwfn,
1639 				       p_hwfn->hw_info.opaque_fid,
1640 				       vport_id);
1641 
1642 		if (rc) {
1643 			DP_ERR(cdev, "Failed to stop VPORT\n");
1644 			return rc;
1645 		}
1646 	}
1647 	return 0;
1648 }
1649 
1650 static int qed_update_vport(struct qed_dev *cdev,
1651 			    struct qed_update_vport_params *params)
1652 {
1653 	struct qed_sp_vport_update_params sp_params;
1654 	struct qed_rss_params sp_rss_params;
1655 	int rc, i;
1656 
1657 	if (!cdev)
1658 		return -ENODEV;
1659 
1660 	memset(&sp_params, 0, sizeof(sp_params));
1661 	memset(&sp_rss_params, 0, sizeof(sp_rss_params));
1662 
1663 	/* Translate protocol params into sp params */
1664 	sp_params.vport_id = params->vport_id;
1665 	sp_params.update_vport_active_rx_flg =
1666 		params->update_vport_active_flg;
1667 	sp_params.update_vport_active_tx_flg =
1668 		params->update_vport_active_flg;
1669 	sp_params.vport_active_rx_flg = params->vport_active_flg;
1670 	sp_params.vport_active_tx_flg = params->vport_active_flg;
1671 	sp_params.accept_any_vlan = params->accept_any_vlan;
1672 	sp_params.update_accept_any_vlan_flg =
1673 		params->update_accept_any_vlan_flg;
1674 
1675 	/* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
1676 	 * We need to re-fix the rss values per engine for CMT.
1677 	 */
1678 	if (cdev->num_hwfns > 1 && params->update_rss_flg) {
1679 		struct qed_update_vport_rss_params *rss =
1680 			&params->rss_params;
1681 		int k, max = 0;
1682 
1683 		/* Find largest entry, since it's possible RSS needs to
1684 		 * be disabled [in case only 1 queue per-hwfn]
1685 		 */
1686 		for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1687 			max = (max > rss->rss_ind_table[k]) ?
1688 				max : rss->rss_ind_table[k];
1689 
1690 		/* Either fix RSS values or disable RSS */
1691 		if (cdev->num_hwfns < max + 1) {
1692 			int divisor = (max + cdev->num_hwfns - 1) /
1693 				cdev->num_hwfns;
1694 
1695 			DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1696 				   "CMT - fixing RSS values (modulo %02x)\n",
1697 				   divisor);
1698 
1699 			for (k = 0; k < QED_RSS_IND_TABLE_SIZE; k++)
1700 				rss->rss_ind_table[k] =
1701 					rss->rss_ind_table[k] % divisor;
1702 		} else {
1703 			DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1704 				   "CMT - 1 queue per-hwfn; Disabling RSS\n");
1705 			params->update_rss_flg = 0;
1706 		}
1707 	}
1708 
1709 	/* Now, update the RSS configuration for actual configuration */
1710 	if (params->update_rss_flg) {
1711 		sp_rss_params.update_rss_config = 1;
1712 		sp_rss_params.rss_enable = 1;
1713 		sp_rss_params.update_rss_capabilities = 1;
1714 		sp_rss_params.update_rss_ind_table = 1;
1715 		sp_rss_params.update_rss_key = 1;
1716 		sp_rss_params.rss_caps = QED_RSS_IPV4 |
1717 					 QED_RSS_IPV6 |
1718 					 QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
1719 		sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
1720 		memcpy(sp_rss_params.rss_ind_table,
1721 		       params->rss_params.rss_ind_table,
1722 		       QED_RSS_IND_TABLE_SIZE * sizeof(u16));
1723 		memcpy(sp_rss_params.rss_key, params->rss_params.rss_key,
1724 		       QED_RSS_KEY_SIZE * sizeof(u32));
1725 	}
1726 	sp_params.rss_params = &sp_rss_params;
1727 
1728 	for_each_hwfn(cdev, i) {
1729 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1730 
1731 		sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
1732 		rc = qed_sp_vport_update(p_hwfn, &sp_params,
1733 					 QED_SPQ_MODE_EBLOCK,
1734 					 NULL);
1735 		if (rc) {
1736 			DP_ERR(cdev, "Failed to update VPORT\n");
1737 			return rc;
1738 		}
1739 
1740 		DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1741 			   "Updated V-PORT %d: active_flag %d [update %d]\n",
1742 			   params->vport_id, params->vport_active_flg,
1743 			   params->update_vport_active_flg);
1744 	}
1745 
1746 	return 0;
1747 }
1748 
1749 static int qed_start_rxq(struct qed_dev *cdev,
1750 			 struct qed_queue_start_common_params *params,
1751 			 u16 bd_max_bytes,
1752 			 dma_addr_t bd_chain_phys_addr,
1753 			 dma_addr_t cqe_pbl_addr,
1754 			 u16 cqe_pbl_size,
1755 			 void __iomem **pp_prod)
1756 {
1757 	int rc, hwfn_index;
1758 	struct qed_hwfn *p_hwfn;
1759 
1760 	hwfn_index = params->rss_id % cdev->num_hwfns;
1761 	p_hwfn = &cdev->hwfns[hwfn_index];
1762 
1763 	/* Fix queue ID in 100g mode */
1764 	params->queue_id /= cdev->num_hwfns;
1765 
1766 	rc = qed_sp_eth_rx_queue_start(p_hwfn,
1767 				       p_hwfn->hw_info.opaque_fid,
1768 				       params,
1769 				       bd_max_bytes,
1770 				       bd_chain_phys_addr,
1771 				       cqe_pbl_addr,
1772 				       cqe_pbl_size,
1773 				       pp_prod);
1774 
1775 	if (rc) {
1776 		DP_ERR(cdev, "Failed to start RXQ#%d\n", params->queue_id);
1777 		return rc;
1778 	}
1779 
1780 	DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1781 		   "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1782 		   params->queue_id, params->rss_id, params->vport_id,
1783 		   params->sb);
1784 
1785 	return 0;
1786 }
1787 
1788 static int qed_stop_rxq(struct qed_dev *cdev,
1789 			struct qed_stop_rxq_params *params)
1790 {
1791 	int rc, hwfn_index;
1792 	struct qed_hwfn *p_hwfn;
1793 
1794 	hwfn_index	= params->rss_id % cdev->num_hwfns;
1795 	p_hwfn		= &cdev->hwfns[hwfn_index];
1796 
1797 	rc = qed_sp_eth_rx_queue_stop(p_hwfn,
1798 				      params->rx_queue_id / cdev->num_hwfns,
1799 				      params->eq_completion_only,
1800 				      false);
1801 	if (rc) {
1802 		DP_ERR(cdev, "Failed to stop RXQ#%d\n", params->rx_queue_id);
1803 		return rc;
1804 	}
1805 
1806 	return 0;
1807 }
1808 
1809 static int qed_start_txq(struct qed_dev *cdev,
1810 			 struct qed_queue_start_common_params *p_params,
1811 			 dma_addr_t pbl_addr,
1812 			 u16 pbl_size,
1813 			 void __iomem **pp_doorbell)
1814 {
1815 	struct qed_hwfn *p_hwfn;
1816 	int rc, hwfn_index;
1817 
1818 	hwfn_index	= p_params->rss_id % cdev->num_hwfns;
1819 	p_hwfn		= &cdev->hwfns[hwfn_index];
1820 
1821 	/* Fix queue ID in 100g mode */
1822 	p_params->queue_id /= cdev->num_hwfns;
1823 
1824 	rc = qed_sp_eth_tx_queue_start(p_hwfn,
1825 				       p_hwfn->hw_info.opaque_fid,
1826 				       p_params,
1827 				       pbl_addr,
1828 				       pbl_size,
1829 				       pp_doorbell);
1830 
1831 	if (rc) {
1832 		DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
1833 		return rc;
1834 	}
1835 
1836 	DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
1837 		   "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n",
1838 		   p_params->queue_id, p_params->rss_id, p_params->vport_id,
1839 		   p_params->sb);
1840 
1841 	return 0;
1842 }
1843 
1844 #define QED_HW_STOP_RETRY_LIMIT (10)
1845 static int qed_fastpath_stop(struct qed_dev *cdev)
1846 {
1847 	qed_hw_stop_fastpath(cdev);
1848 
1849 	return 0;
1850 }
1851 
1852 static int qed_stop_txq(struct qed_dev *cdev,
1853 			struct qed_stop_txq_params *params)
1854 {
1855 	struct qed_hwfn *p_hwfn;
1856 	int rc, hwfn_index;
1857 
1858 	hwfn_index	= params->rss_id % cdev->num_hwfns;
1859 	p_hwfn		= &cdev->hwfns[hwfn_index];
1860 
1861 	rc = qed_sp_eth_tx_queue_stop(p_hwfn,
1862 				      params->tx_queue_id / cdev->num_hwfns);
1863 	if (rc) {
1864 		DP_ERR(cdev, "Failed to stop TXQ#%d\n", params->tx_queue_id);
1865 		return rc;
1866 	}
1867 
1868 	return 0;
1869 }
1870 
1871 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
1872 					enum qed_filter_rx_mode_type type)
1873 {
1874 	struct qed_filter_accept_flags accept_flags;
1875 
1876 	memset(&accept_flags, 0, sizeof(accept_flags));
1877 
1878 	accept_flags.update_rx_mode_config	= 1;
1879 	accept_flags.update_tx_mode_config	= 1;
1880 	accept_flags.rx_accept_filter		= QED_ACCEPT_UCAST_MATCHED |
1881 						  QED_ACCEPT_MCAST_MATCHED |
1882 						  QED_ACCEPT_BCAST;
1883 	accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
1884 					QED_ACCEPT_MCAST_MATCHED |
1885 					QED_ACCEPT_BCAST;
1886 
1887 	if (type == QED_FILTER_RX_MODE_TYPE_PROMISC)
1888 		accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
1889 						 QED_ACCEPT_MCAST_UNMATCHED;
1890 	else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC)
1891 		accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
1892 
1893 	return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
1894 				     QED_SPQ_MODE_CB, NULL);
1895 }
1896 
1897 static int qed_configure_filter_ucast(struct qed_dev *cdev,
1898 				      struct qed_filter_ucast_params *params)
1899 {
1900 	struct qed_filter_ucast ucast;
1901 
1902 	if (!params->vlan_valid && !params->mac_valid) {
1903 		DP_NOTICE(
1904 			cdev,
1905 			"Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
1906 		return -EINVAL;
1907 	}
1908 
1909 	memset(&ucast, 0, sizeof(ucast));
1910 	switch (params->type) {
1911 	case QED_FILTER_XCAST_TYPE_ADD:
1912 		ucast.opcode = QED_FILTER_ADD;
1913 		break;
1914 	case QED_FILTER_XCAST_TYPE_DEL:
1915 		ucast.opcode = QED_FILTER_REMOVE;
1916 		break;
1917 	case QED_FILTER_XCAST_TYPE_REPLACE:
1918 		ucast.opcode = QED_FILTER_REPLACE;
1919 		break;
1920 	default:
1921 		DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
1922 			  params->type);
1923 	}
1924 
1925 	if (params->vlan_valid && params->mac_valid) {
1926 		ucast.type = QED_FILTER_MAC_VLAN;
1927 		ether_addr_copy(ucast.mac, params->mac);
1928 		ucast.vlan = params->vlan;
1929 	} else if (params->mac_valid) {
1930 		ucast.type = QED_FILTER_MAC;
1931 		ether_addr_copy(ucast.mac, params->mac);
1932 	} else {
1933 		ucast.type = QED_FILTER_VLAN;
1934 		ucast.vlan = params->vlan;
1935 	}
1936 
1937 	ucast.is_rx_filter = true;
1938 	ucast.is_tx_filter = true;
1939 
1940 	return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
1941 }
1942 
1943 static int qed_configure_filter_mcast(struct qed_dev *cdev,
1944 				      struct qed_filter_mcast_params *params)
1945 {
1946 	struct qed_filter_mcast mcast;
1947 	int i;
1948 
1949 	memset(&mcast, 0, sizeof(mcast));
1950 	switch (params->type) {
1951 	case QED_FILTER_XCAST_TYPE_ADD:
1952 		mcast.opcode = QED_FILTER_ADD;
1953 		break;
1954 	case QED_FILTER_XCAST_TYPE_DEL:
1955 		mcast.opcode = QED_FILTER_REMOVE;
1956 		break;
1957 	default:
1958 		DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
1959 			  params->type);
1960 	}
1961 
1962 	mcast.num_mc_addrs = params->num;
1963 	for (i = 0; i < mcast.num_mc_addrs; i++)
1964 		ether_addr_copy(mcast.mac[i], params->mac[i]);
1965 
1966 	return qed_filter_mcast_cmd(cdev, &mcast,
1967 				    QED_SPQ_MODE_CB, NULL);
1968 }
1969 
1970 static int qed_configure_filter(struct qed_dev *cdev,
1971 				struct qed_filter_params *params)
1972 {
1973 	enum qed_filter_rx_mode_type accept_flags;
1974 
1975 	switch (params->type) {
1976 	case QED_FILTER_TYPE_UCAST:
1977 		return qed_configure_filter_ucast(cdev, &params->filter.ucast);
1978 	case QED_FILTER_TYPE_MCAST:
1979 		return qed_configure_filter_mcast(cdev, &params->filter.mcast);
1980 	case QED_FILTER_TYPE_RX_MODE:
1981 		accept_flags = params->filter.accept_flags;
1982 		return qed_configure_filter_rx_mode(cdev, accept_flags);
1983 	default:
1984 		DP_NOTICE(cdev, "Unknown filter type %d\n",
1985 			  (int)params->type);
1986 		return -EINVAL;
1987 	}
1988 }
1989 
1990 static int qed_fp_cqe_completion(struct qed_dev *dev,
1991 				 u8 rss_id,
1992 				 struct eth_slow_path_rx_cqe *cqe)
1993 {
1994 	return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
1995 				      cqe);
1996 }
1997 
1998 static const struct qed_eth_ops qed_eth_ops_pass = {
1999 	.common = &qed_common_ops_pass,
2000 	.fill_dev_info = &qed_fill_eth_dev_info,
2001 	.register_ops = &qed_register_eth_ops,
2002 	.vport_start = &qed_start_vport,
2003 	.vport_stop = &qed_stop_vport,
2004 	.vport_update = &qed_update_vport,
2005 	.q_rx_start = &qed_start_rxq,
2006 	.q_rx_stop = &qed_stop_rxq,
2007 	.q_tx_start = &qed_start_txq,
2008 	.q_tx_stop = &qed_stop_txq,
2009 	.filter_config = &qed_configure_filter,
2010 	.fastpath_stop = &qed_fastpath_stop,
2011 	.eth_cqe_completion = &qed_fp_cqe_completion,
2012 	.get_vport_stats = &qed_get_vport_stats,
2013 };
2014 
2015 const struct qed_eth_ops *qed_get_eth_ops(u32 version)
2016 {
2017 	if (version != QED_ETH_INTERFACE_VERSION) {
2018 		pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
2019 			  version, QED_ETH_INTERFACE_VERSION);
2020 		return NULL;
2021 	}
2022 
2023 	return &qed_eth_ops_pass;
2024 }
2025 EXPORT_SYMBOL(qed_get_eth_ops);
2026 
2027 void qed_put_eth_ops(void)
2028 {
2029 	/* TODO - reference count for module? */
2030 }
2031 EXPORT_SYMBOL(qed_put_eth_ops);
2032