1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <asm/param.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/etherdevice.h>
39 #include <linux/interrupt.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/stddef.h>
45 #include <linux/string.h>
46 #include <linux/workqueue.h>
47 #include <linux/bitops.h>
48 #include <linux/bug.h>
49 #include <linux/vmalloc.h>
50 #include "qed.h"
51 #include <linux/qed/qed_chain.h>
52 #include "qed_cxt.h"
53 #include "qed_dev_api.h"
54 #include <linux/qed/qed_eth_if.h>
55 #include "qed_hsi.h"
56 #include "qed_hw.h"
57 #include "qed_int.h"
58 #include "qed_l2.h"
59 #include "qed_mcp.h"
60 #include "qed_reg_addr.h"
61 #include "qed_sp.h"
62 #include "qed_sriov.h"
63 
64 
65 #define QED_MAX_SGES_NUM 16
66 #define CRC32_POLY 0x1edc6f41
67 
68 struct qed_l2_info {
69 	u32 queues;
70 	unsigned long **pp_qid_usage;
71 
72 	/* The lock is meant to synchronize access to the qid usage */
73 	struct mutex lock;
74 };
75 
76 int qed_l2_alloc(struct qed_hwfn *p_hwfn)
77 {
78 	struct qed_l2_info *p_l2_info;
79 	unsigned long **pp_qids;
80 	u32 i;
81 
82 	if (!QED_IS_L2_PERSONALITY(p_hwfn))
83 		return 0;
84 
85 	p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
86 	if (!p_l2_info)
87 		return -ENOMEM;
88 	p_hwfn->p_l2_info = p_l2_info;
89 
90 	if (IS_PF(p_hwfn->cdev)) {
91 		p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE);
92 	} else {
93 		u8 rx = 0, tx = 0;
94 
95 		qed_vf_get_num_rxqs(p_hwfn, &rx);
96 		qed_vf_get_num_txqs(p_hwfn, &tx);
97 
98 		p_l2_info->queues = max_t(u8, rx, tx);
99 	}
100 
101 	pp_qids = kzalloc(sizeof(unsigned long *) * p_l2_info->queues,
102 			  GFP_KERNEL);
103 	if (!pp_qids)
104 		return -ENOMEM;
105 	p_l2_info->pp_qid_usage = pp_qids;
106 
107 	for (i = 0; i < p_l2_info->queues; i++) {
108 		pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL);
109 		if (!pp_qids[i])
110 			return -ENOMEM;
111 	}
112 
113 	return 0;
114 }
115 
116 void qed_l2_setup(struct qed_hwfn *p_hwfn)
117 {
118 	if (!QED_IS_L2_PERSONALITY(p_hwfn))
119 		return;
120 
121 	mutex_init(&p_hwfn->p_l2_info->lock);
122 }
123 
124 void qed_l2_free(struct qed_hwfn *p_hwfn)
125 {
126 	u32 i;
127 
128 	if (!QED_IS_L2_PERSONALITY(p_hwfn))
129 		return;
130 
131 	if (!p_hwfn->p_l2_info)
132 		return;
133 
134 	if (!p_hwfn->p_l2_info->pp_qid_usage)
135 		goto out_l2_info;
136 
137 	/* Free until hit first uninitialized entry */
138 	for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
139 		if (!p_hwfn->p_l2_info->pp_qid_usage[i])
140 			break;
141 		kfree(p_hwfn->p_l2_info->pp_qid_usage[i]);
142 	}
143 
144 	kfree(p_hwfn->p_l2_info->pp_qid_usage);
145 
146 out_l2_info:
147 	kfree(p_hwfn->p_l2_info);
148 	p_hwfn->p_l2_info = NULL;
149 }
150 
151 static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn,
152 					struct qed_queue_cid *p_cid)
153 {
154 	struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info;
155 	u16 queue_id = p_cid->rel.queue_id;
156 	bool b_rc = true;
157 	u8 first;
158 
159 	mutex_lock(&p_l2_info->lock);
160 
161 	if (queue_id >= p_l2_info->queues) {
162 		DP_NOTICE(p_hwfn,
163 			  "Requested to increase usage for qzone %04x out of %08x\n",
164 			  queue_id, p_l2_info->queues);
165 		b_rc = false;
166 		goto out;
167 	}
168 
169 	first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id],
170 					MAX_QUEUES_PER_QZONE);
171 	if (first >= MAX_QUEUES_PER_QZONE) {
172 		b_rc = false;
173 		goto out;
174 	}
175 
176 	__set_bit(first, p_l2_info->pp_qid_usage[queue_id]);
177 	p_cid->qid_usage_idx = first;
178 
179 out:
180 	mutex_unlock(&p_l2_info->lock);
181 	return b_rc;
182 }
183 
184 static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn,
185 					struct qed_queue_cid *p_cid)
186 {
187 	mutex_lock(&p_hwfn->p_l2_info->lock);
188 
189 	clear_bit(p_cid->qid_usage_idx,
190 		  p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
191 
192 	mutex_unlock(&p_hwfn->p_l2_info->lock);
193 }
194 
195 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
196 			       struct qed_queue_cid *p_cid)
197 {
198 	bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID);
199 
200 	if (IS_PF(p_hwfn->cdev) && !b_legacy_vf)
201 		_qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
202 
203 	/* For PF's VFs we maintain the index inside queue-zone in IOV */
204 	if (p_cid->vfid == QED_QUEUE_CID_SELF)
205 		qed_eth_queue_qid_usage_del(p_hwfn, p_cid);
206 
207 	vfree(p_cid);
208 }
209 
210 /* The internal is only meant to be directly called by PFs initializeing CIDs
211  * for their VFs.
212  */
213 static struct qed_queue_cid *
214 _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
215 		      u16 opaque_fid,
216 		      u32 cid,
217 		      struct qed_queue_start_common_params *p_params,
218 		      bool b_is_rx,
219 		      struct qed_queue_cid_vf_params *p_vf_params)
220 {
221 	struct qed_queue_cid *p_cid;
222 	int rc;
223 
224 	p_cid = vzalloc(sizeof(*p_cid));
225 	if (!p_cid)
226 		return NULL;
227 
228 	p_cid->opaque_fid = opaque_fid;
229 	p_cid->cid = cid;
230 	p_cid->p_owner = p_hwfn;
231 
232 	/* Fill in parameters */
233 	p_cid->rel.vport_id = p_params->vport_id;
234 	p_cid->rel.queue_id = p_params->queue_id;
235 	p_cid->rel.stats_id = p_params->stats_id;
236 	p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
237 	p_cid->b_is_rx = b_is_rx;
238 	p_cid->sb_idx = p_params->sb_idx;
239 
240 	/* Fill-in bits related to VFs' queues if information was provided */
241 	if (p_vf_params) {
242 		p_cid->vfid = p_vf_params->vfid;
243 		p_cid->vf_qid = p_vf_params->vf_qid;
244 		p_cid->vf_legacy = p_vf_params->vf_legacy;
245 	} else {
246 		p_cid->vfid = QED_QUEUE_CID_SELF;
247 	}
248 
249 	/* Don't try calculating the absolute indices for VFs */
250 	if (IS_VF(p_hwfn->cdev)) {
251 		p_cid->abs = p_cid->rel;
252 		goto out;
253 	}
254 
255 	/* Calculate the engine-absolute indices of the resources.
256 	 * This would guarantee they're valid later on.
257 	 * In some cases [SBs] we already have the right values.
258 	 */
259 	rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
260 	if (rc)
261 		goto fail;
262 
263 	rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
264 	if (rc)
265 		goto fail;
266 
267 	/* In case of a PF configuring its VF's queues, the stats-id is already
268 	 * absolute [since there's a single index that's suitable per-VF].
269 	 */
270 	if (p_cid->vfid == QED_QUEUE_CID_SELF) {
271 		rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
272 				  &p_cid->abs.stats_id);
273 		if (rc)
274 			goto fail;
275 	} else {
276 		p_cid->abs.stats_id = p_cid->rel.stats_id;
277 	}
278 
279 out:
280 	/* VF-images have provided the qid_usage_idx on their own.
281 	 * Otherwise, we need to allocate a unique one.
282 	 */
283 	if (!p_vf_params) {
284 		if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid))
285 			goto fail;
286 	} else {
287 		p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
288 	}
289 
290 	DP_VERBOSE(p_hwfn,
291 		   QED_MSG_SP,
292 		   "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
293 		   p_cid->opaque_fid,
294 		   p_cid->cid,
295 		   p_cid->rel.vport_id,
296 		   p_cid->abs.vport_id,
297 		   p_cid->rel.queue_id,
298 		   p_cid->qid_usage_idx,
299 		   p_cid->abs.queue_id,
300 		   p_cid->rel.stats_id,
301 		   p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx);
302 
303 	return p_cid;
304 
305 fail:
306 	vfree(p_cid);
307 	return NULL;
308 }
309 
310 struct qed_queue_cid *
311 qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
312 		     u16 opaque_fid,
313 		     struct qed_queue_start_common_params *p_params,
314 		     bool b_is_rx,
315 		     struct qed_queue_cid_vf_params *p_vf_params)
316 {
317 	struct qed_queue_cid *p_cid;
318 	u8 vfid = QED_CXT_PF_CID;
319 	bool b_legacy_vf = false;
320 	u32 cid = 0;
321 
322 	/* In case of legacy VFs, The CID can be derived from the additional
323 	 * VF parameters - the VF assumes queue X uses CID X, so we can simply
324 	 * use the vf_qid for this purpose as well.
325 	 */
326 	if (p_vf_params) {
327 		vfid = p_vf_params->vfid;
328 
329 		if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) {
330 			b_legacy_vf = true;
331 			cid = p_vf_params->vf_qid;
332 		}
333 	}
334 
335 	/* Get a unique firmware CID for this queue, in case it's a PF.
336 	 * VF's don't need a CID as the queue configuration will be done
337 	 * by PF.
338 	 */
339 	if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
340 		if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
341 					 &cid, vfid)) {
342 			DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
343 			return NULL;
344 		}
345 	}
346 
347 	p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
348 				      p_params, b_is_rx, p_vf_params);
349 	if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
350 		_qed_cxt_release_cid(p_hwfn, cid, vfid);
351 
352 	return p_cid;
353 }
354 
355 static struct qed_queue_cid *
356 qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn,
357 			u16 opaque_fid,
358 			bool b_is_rx,
359 			struct qed_queue_start_common_params *p_params)
360 {
361 	return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
362 				    NULL);
363 }
364 
365 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
366 			   struct qed_sp_vport_start_params *p_params)
367 {
368 	struct vport_start_ramrod_data *p_ramrod = NULL;
369 	struct qed_spq_entry *p_ent =  NULL;
370 	struct qed_sp_init_data init_data;
371 	u8 abs_vport_id = 0;
372 	int rc = -EINVAL;
373 	u16 rx_mode = 0;
374 
375 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
376 	if (rc)
377 		return rc;
378 
379 	memset(&init_data, 0, sizeof(init_data));
380 	init_data.cid = qed_spq_get_cid(p_hwfn);
381 	init_data.opaque_fid = p_params->opaque_fid;
382 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
383 
384 	rc = qed_sp_init_request(p_hwfn, &p_ent,
385 				 ETH_RAMROD_VPORT_START,
386 				 PROTOCOLID_ETH, &init_data);
387 	if (rc)
388 		return rc;
389 
390 	p_ramrod		= &p_ent->ramrod.vport_start;
391 	p_ramrod->vport_id	= abs_vport_id;
392 
393 	p_ramrod->mtu			= cpu_to_le16(p_params->mtu);
394 	p_ramrod->handle_ptp_pkts	= p_params->handle_ptp_pkts;
395 	p_ramrod->inner_vlan_removal_en	= p_params->remove_inner_vlan;
396 	p_ramrod->drop_ttl0_en		= p_params->drop_ttl0;
397 	p_ramrod->untagged		= p_params->only_untagged;
398 
399 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
400 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
401 
402 	p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
403 
404 	/* TPA related fields */
405 	memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
406 
407 	p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
408 
409 	switch (p_params->tpa_mode) {
410 	case QED_TPA_MODE_GRO:
411 		p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
412 		p_ramrod->tpa_param.tpa_max_size = (u16)-1;
413 		p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
414 		p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
415 		p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
416 		p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
417 		p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
418 		p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
419 		break;
420 	default:
421 		break;
422 	}
423 
424 	p_ramrod->tx_switching_en = p_params->tx_switching;
425 
426 	p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
427 	p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
428 
429 	/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
430 	p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
431 						  p_params->concrete_fid);
432 
433 	return qed_spq_post(p_hwfn, p_ent, NULL);
434 }
435 
436 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
437 			      struct qed_sp_vport_start_params *p_params)
438 {
439 	if (IS_VF(p_hwfn->cdev)) {
440 		return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
441 					     p_params->mtu,
442 					     p_params->remove_inner_vlan,
443 					     p_params->tpa_mode,
444 					     p_params->max_buffers_per_cqe,
445 					     p_params->only_untagged);
446 	}
447 
448 	return qed_sp_eth_vport_start(p_hwfn, p_params);
449 }
450 
451 static int
452 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
453 			struct vport_update_ramrod_data *p_ramrod,
454 			struct qed_rss_params *p_rss)
455 {
456 	struct eth_vport_rss_config *p_config;
457 	u16 capabilities = 0;
458 	int i, table_size;
459 	int rc = 0;
460 
461 	if (!p_rss) {
462 		p_ramrod->common.update_rss_flg = 0;
463 		return rc;
464 	}
465 	p_config = &p_ramrod->rss_config;
466 
467 	BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
468 
469 	rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
470 	if (rc)
471 		return rc;
472 
473 	p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
474 	p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
475 	p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
476 	p_config->update_rss_key = p_rss->update_rss_key;
477 
478 	p_config->rss_mode = p_rss->rss_enable ?
479 			     ETH_VPORT_RSS_MODE_REGULAR :
480 			     ETH_VPORT_RSS_MODE_DISABLED;
481 
482 	SET_FIELD(capabilities,
483 		  ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
484 		  !!(p_rss->rss_caps & QED_RSS_IPV4));
485 	SET_FIELD(capabilities,
486 		  ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
487 		  !!(p_rss->rss_caps & QED_RSS_IPV6));
488 	SET_FIELD(capabilities,
489 		  ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
490 		  !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
491 	SET_FIELD(capabilities,
492 		  ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
493 		  !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
494 	SET_FIELD(capabilities,
495 		  ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
496 		  !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
497 	SET_FIELD(capabilities,
498 		  ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
499 		  !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
500 	p_config->tbl_size = p_rss->rss_table_size_log;
501 
502 	p_config->capabilities = cpu_to_le16(capabilities);
503 
504 	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
505 		   "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
506 		   p_ramrod->common.update_rss_flg,
507 		   p_config->rss_mode,
508 		   p_config->update_rss_capabilities,
509 		   p_config->capabilities,
510 		   p_config->update_rss_ind_table, p_config->update_rss_key);
511 
512 	table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
513 			   1 << p_config->tbl_size);
514 	for (i = 0; i < table_size; i++) {
515 		struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
516 
517 		if (!p_queue)
518 			return -EINVAL;
519 
520 		p_config->indirection_table[i] =
521 		    cpu_to_le16(p_queue->abs.queue_id);
522 	}
523 
524 	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
525 		   "Configured RSS indirection table [%d entries]:\n",
526 		   table_size);
527 	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
528 		DP_VERBOSE(p_hwfn,
529 			   NETIF_MSG_IFUP,
530 			   "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
531 			   le16_to_cpu(p_config->indirection_table[i]),
532 			   le16_to_cpu(p_config->indirection_table[i + 1]),
533 			   le16_to_cpu(p_config->indirection_table[i + 2]),
534 			   le16_to_cpu(p_config->indirection_table[i + 3]),
535 			   le16_to_cpu(p_config->indirection_table[i + 4]),
536 			   le16_to_cpu(p_config->indirection_table[i + 5]),
537 			   le16_to_cpu(p_config->indirection_table[i + 6]),
538 			   le16_to_cpu(p_config->indirection_table[i + 7]),
539 			   le16_to_cpu(p_config->indirection_table[i + 8]),
540 			   le16_to_cpu(p_config->indirection_table[i + 9]),
541 			   le16_to_cpu(p_config->indirection_table[i + 10]),
542 			   le16_to_cpu(p_config->indirection_table[i + 11]),
543 			   le16_to_cpu(p_config->indirection_table[i + 12]),
544 			   le16_to_cpu(p_config->indirection_table[i + 13]),
545 			   le16_to_cpu(p_config->indirection_table[i + 14]),
546 			   le16_to_cpu(p_config->indirection_table[i + 15]));
547 	}
548 
549 	for (i = 0; i < 10; i++)
550 		p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
551 
552 	return rc;
553 }
554 
555 static void
556 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
557 			  struct vport_update_ramrod_data *p_ramrod,
558 			  struct qed_filter_accept_flags accept_flags)
559 {
560 	p_ramrod->common.update_rx_mode_flg =
561 		accept_flags.update_rx_mode_config;
562 
563 	p_ramrod->common.update_tx_mode_flg =
564 		accept_flags.update_tx_mode_config;
565 
566 	/* Set Rx mode accept flags */
567 	if (p_ramrod->common.update_rx_mode_flg) {
568 		u8 accept_filter = accept_flags.rx_accept_filter;
569 		u16 state = 0;
570 
571 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
572 			  !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
573 			    !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
574 
575 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
576 			  !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
577 
578 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
579 			  !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
580 			    !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
581 
582 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
583 			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
584 			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
585 
586 		SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
587 			  !!(accept_filter & QED_ACCEPT_BCAST));
588 
589 		p_ramrod->rx_mode.state = cpu_to_le16(state);
590 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
591 			   "p_ramrod->rx_mode.state = 0x%x\n", state);
592 	}
593 
594 	/* Set Tx mode accept flags */
595 	if (p_ramrod->common.update_tx_mode_flg) {
596 		u8 accept_filter = accept_flags.tx_accept_filter;
597 		u16 state = 0;
598 
599 		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
600 			  !!(accept_filter & QED_ACCEPT_NONE));
601 
602 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
603 			  !!(accept_filter & QED_ACCEPT_NONE));
604 
605 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
606 			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
607 			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
608 
609 		SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
610 			  !!(accept_filter & QED_ACCEPT_BCAST));
611 
612 		p_ramrod->tx_mode.state = cpu_to_le16(state);
613 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
614 			   "p_ramrod->tx_mode.state = 0x%x\n", state);
615 	}
616 }
617 
618 static void
619 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
620 			    struct vport_update_ramrod_data *p_ramrod,
621 			    struct qed_sge_tpa_params *p_params)
622 {
623 	struct eth_vport_tpa_param *p_tpa;
624 
625 	if (!p_params) {
626 		p_ramrod->common.update_tpa_param_flg = 0;
627 		p_ramrod->common.update_tpa_en_flg = 0;
628 		p_ramrod->common.update_tpa_param_flg = 0;
629 		return;
630 	}
631 
632 	p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
633 	p_tpa = &p_ramrod->tpa_param;
634 	p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
635 	p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
636 	p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
637 	p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
638 
639 	p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
640 	p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
641 	p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
642 	p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
643 	p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
644 	p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
645 	p_tpa->tpa_max_size = p_params->tpa_max_size;
646 	p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
647 	p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
648 }
649 
650 static void
651 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
652 			struct vport_update_ramrod_data *p_ramrod,
653 			struct qed_sp_vport_update_params *p_params)
654 {
655 	int i;
656 
657 	memset(&p_ramrod->approx_mcast.bins, 0,
658 	       sizeof(p_ramrod->approx_mcast.bins));
659 
660 	if (!p_params->update_approx_mcast_flg)
661 		return;
662 
663 	p_ramrod->common.update_approx_mcast_flg = 1;
664 	for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
665 		u32 *p_bins = (u32 *)p_params->bins;
666 
667 		p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
668 	}
669 }
670 
671 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
672 			struct qed_sp_vport_update_params *p_params,
673 			enum spq_mode comp_mode,
674 			struct qed_spq_comp_cb *p_comp_data)
675 {
676 	struct qed_rss_params *p_rss_params = p_params->rss_params;
677 	struct vport_update_ramrod_data_cmn *p_cmn;
678 	struct qed_sp_init_data init_data;
679 	struct vport_update_ramrod_data *p_ramrod = NULL;
680 	struct qed_spq_entry *p_ent = NULL;
681 	u8 abs_vport_id = 0, val;
682 	int rc = -EINVAL;
683 
684 	if (IS_VF(p_hwfn->cdev)) {
685 		rc = qed_vf_pf_vport_update(p_hwfn, p_params);
686 		return rc;
687 	}
688 
689 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
690 	if (rc)
691 		return rc;
692 
693 	memset(&init_data, 0, sizeof(init_data));
694 	init_data.cid = qed_spq_get_cid(p_hwfn);
695 	init_data.opaque_fid = p_params->opaque_fid;
696 	init_data.comp_mode = comp_mode;
697 	init_data.p_comp_data = p_comp_data;
698 
699 	rc = qed_sp_init_request(p_hwfn, &p_ent,
700 				 ETH_RAMROD_VPORT_UPDATE,
701 				 PROTOCOLID_ETH, &init_data);
702 	if (rc)
703 		return rc;
704 
705 	/* Copy input params to ramrod according to FW struct */
706 	p_ramrod = &p_ent->ramrod.vport_update;
707 	p_cmn = &p_ramrod->common;
708 
709 	p_cmn->vport_id = abs_vport_id;
710 	p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
711 	p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
712 	p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
713 	p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
714 	p_cmn->accept_any_vlan = p_params->accept_any_vlan;
715 	val = p_params->update_accept_any_vlan_flg;
716 	p_cmn->update_accept_any_vlan_flg = val;
717 
718 	p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
719 	val = p_params->update_inner_vlan_removal_flg;
720 	p_cmn->update_inner_vlan_removal_en_flg = val;
721 
722 	p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
723 	val = p_params->update_default_vlan_enable_flg;
724 	p_cmn->update_default_vlan_en_flg = val;
725 
726 	p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
727 	p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
728 
729 	p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
730 
731 	p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
732 	p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
733 
734 	p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
735 	val = p_params->update_anti_spoofing_en_flg;
736 	p_ramrod->common.update_anti_spoofing_en_flg = val;
737 
738 	rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
739 	if (rc) {
740 		/* Return spq entry which is taken in qed_sp_init_request()*/
741 		qed_spq_return_entry(p_hwfn, p_ent);
742 		return rc;
743 	}
744 
745 	/* Update mcast bins for VFs, PF doesn't use this functionality */
746 	qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
747 
748 	qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
749 	qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
750 	return qed_spq_post(p_hwfn, p_ent, NULL);
751 }
752 
753 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
754 {
755 	struct vport_stop_ramrod_data *p_ramrod;
756 	struct qed_sp_init_data init_data;
757 	struct qed_spq_entry *p_ent;
758 	u8 abs_vport_id = 0;
759 	int rc;
760 
761 	if (IS_VF(p_hwfn->cdev))
762 		return qed_vf_pf_vport_stop(p_hwfn);
763 
764 	rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
765 	if (rc)
766 		return rc;
767 
768 	memset(&init_data, 0, sizeof(init_data));
769 	init_data.cid = qed_spq_get_cid(p_hwfn);
770 	init_data.opaque_fid = opaque_fid;
771 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
772 
773 	rc = qed_sp_init_request(p_hwfn, &p_ent,
774 				 ETH_RAMROD_VPORT_STOP,
775 				 PROTOCOLID_ETH, &init_data);
776 	if (rc)
777 		return rc;
778 
779 	p_ramrod = &p_ent->ramrod.vport_stop;
780 	p_ramrod->vport_id = abs_vport_id;
781 
782 	return qed_spq_post(p_hwfn, p_ent, NULL);
783 }
784 
785 static int
786 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
787 		       struct qed_filter_accept_flags *p_accept_flags)
788 {
789 	struct qed_sp_vport_update_params s_params;
790 
791 	memset(&s_params, 0, sizeof(s_params));
792 	memcpy(&s_params.accept_flags, p_accept_flags,
793 	       sizeof(struct qed_filter_accept_flags));
794 
795 	return qed_vf_pf_vport_update(p_hwfn, &s_params);
796 }
797 
798 static int qed_filter_accept_cmd(struct qed_dev *cdev,
799 				 u8 vport,
800 				 struct qed_filter_accept_flags accept_flags,
801 				 u8 update_accept_any_vlan,
802 				 u8 accept_any_vlan,
803 				 enum spq_mode comp_mode,
804 				 struct qed_spq_comp_cb *p_comp_data)
805 {
806 	struct qed_sp_vport_update_params vport_update_params;
807 	int i, rc;
808 
809 	/* Prepare and send the vport rx_mode change */
810 	memset(&vport_update_params, 0, sizeof(vport_update_params));
811 	vport_update_params.vport_id = vport;
812 	vport_update_params.accept_flags = accept_flags;
813 	vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
814 	vport_update_params.accept_any_vlan = accept_any_vlan;
815 
816 	for_each_hwfn(cdev, i) {
817 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
818 
819 		vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
820 
821 		if (IS_VF(cdev)) {
822 			rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
823 			if (rc)
824 				return rc;
825 			continue;
826 		}
827 
828 		rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
829 					 comp_mode, p_comp_data);
830 		if (rc) {
831 			DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
832 			return rc;
833 		}
834 
835 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
836 			   "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
837 			   accept_flags.rx_accept_filter,
838 			   accept_flags.tx_accept_filter);
839 		if (update_accept_any_vlan)
840 			DP_VERBOSE(p_hwfn, QED_MSG_SP,
841 				   "accept_any_vlan=%d configured\n",
842 				   accept_any_vlan);
843 	}
844 
845 	return 0;
846 }
847 
848 int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
849 			     struct qed_queue_cid *p_cid,
850 			     u16 bd_max_bytes,
851 			     dma_addr_t bd_chain_phys_addr,
852 			     dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
853 {
854 	struct rx_queue_start_ramrod_data *p_ramrod = NULL;
855 	struct qed_spq_entry *p_ent = NULL;
856 	struct qed_sp_init_data init_data;
857 	int rc = -EINVAL;
858 
859 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
860 		   "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
861 		   p_cid->opaque_fid, p_cid->cid,
862 		   p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id);
863 
864 	/* Get SPQ entry */
865 	memset(&init_data, 0, sizeof(init_data));
866 	init_data.cid = p_cid->cid;
867 	init_data.opaque_fid = p_cid->opaque_fid;
868 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
869 
870 	rc = qed_sp_init_request(p_hwfn, &p_ent,
871 				 ETH_RAMROD_RX_QUEUE_START,
872 				 PROTOCOLID_ETH, &init_data);
873 	if (rc)
874 		return rc;
875 
876 	p_ramrod = &p_ent->ramrod.rx_queue_start;
877 
878 	p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
879 	p_ramrod->sb_index = p_cid->sb_idx;
880 	p_ramrod->vport_id = p_cid->abs.vport_id;
881 	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
882 	p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
883 	p_ramrod->complete_cqe_flg = 0;
884 	p_ramrod->complete_event_flg = 1;
885 
886 	p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
887 	DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
888 
889 	p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
890 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
891 
892 	if (p_cid->vfid != QED_QUEUE_CID_SELF) {
893 		bool b_legacy_vf = !!(p_cid->vf_legacy &
894 				      QED_QCID_LEGACY_VF_RX_PROD);
895 
896 		p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
897 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
898 			   "Queue%s is meant for VF rxq[%02x]\n",
899 			   b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid);
900 		p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
901 	}
902 
903 	return qed_spq_post(p_hwfn, p_ent, NULL);
904 }
905 
906 static int
907 qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
908 			  struct qed_queue_cid *p_cid,
909 			  u16 bd_max_bytes,
910 			  dma_addr_t bd_chain_phys_addr,
911 			  dma_addr_t cqe_pbl_addr,
912 			  u16 cqe_pbl_size, void __iomem **pp_prod)
913 {
914 	u32 init_prod_val = 0;
915 
916 	*pp_prod = p_hwfn->regview +
917 		   GTT_BAR0_MAP_REG_MSDM_RAM +
918 		    MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
919 
920 	/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
921 	__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
922 			  (u32 *)(&init_prod_val));
923 
924 	return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
925 					bd_max_bytes,
926 					bd_chain_phys_addr,
927 					cqe_pbl_addr, cqe_pbl_size);
928 }
929 
930 static int
931 qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
932 		       u16 opaque_fid,
933 		       struct qed_queue_start_common_params *p_params,
934 		       u16 bd_max_bytes,
935 		       dma_addr_t bd_chain_phys_addr,
936 		       dma_addr_t cqe_pbl_addr,
937 		       u16 cqe_pbl_size,
938 		       struct qed_rxq_start_ret_params *p_ret_params)
939 {
940 	struct qed_queue_cid *p_cid;
941 	int rc;
942 
943 	/* Allocate a CID for the queue */
944 	p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
945 	if (!p_cid)
946 		return -ENOMEM;
947 
948 	if (IS_PF(p_hwfn->cdev)) {
949 		rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
950 					       bd_max_bytes,
951 					       bd_chain_phys_addr,
952 					       cqe_pbl_addr, cqe_pbl_size,
953 					       &p_ret_params->p_prod);
954 	} else {
955 		rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
956 					 bd_max_bytes,
957 					 bd_chain_phys_addr,
958 					 cqe_pbl_addr,
959 					 cqe_pbl_size, &p_ret_params->p_prod);
960 	}
961 
962 	/* Provide the caller with a reference to as handler */
963 	if (rc)
964 		qed_eth_queue_cid_release(p_hwfn, p_cid);
965 	else
966 		p_ret_params->p_handle = (void *)p_cid;
967 
968 	return rc;
969 }
970 
971 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
972 				void **pp_rxq_handles,
973 				u8 num_rxqs,
974 				u8 complete_cqe_flg,
975 				u8 complete_event_flg,
976 				enum spq_mode comp_mode,
977 				struct qed_spq_comp_cb *p_comp_data)
978 {
979 	struct rx_queue_update_ramrod_data *p_ramrod = NULL;
980 	struct qed_spq_entry *p_ent = NULL;
981 	struct qed_sp_init_data init_data;
982 	struct qed_queue_cid *p_cid;
983 	int rc = -EINVAL;
984 	u8 i;
985 
986 	memset(&init_data, 0, sizeof(init_data));
987 	init_data.comp_mode = comp_mode;
988 	init_data.p_comp_data = p_comp_data;
989 
990 	for (i = 0; i < num_rxqs; i++) {
991 		p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
992 
993 		/* Get SPQ entry */
994 		init_data.cid = p_cid->cid;
995 		init_data.opaque_fid = p_cid->opaque_fid;
996 
997 		rc = qed_sp_init_request(p_hwfn, &p_ent,
998 					 ETH_RAMROD_RX_QUEUE_UPDATE,
999 					 PROTOCOLID_ETH, &init_data);
1000 		if (rc)
1001 			return rc;
1002 
1003 		p_ramrod = &p_ent->ramrod.rx_queue_update;
1004 		p_ramrod->vport_id = p_cid->abs.vport_id;
1005 
1006 		p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1007 		p_ramrod->complete_cqe_flg = complete_cqe_flg;
1008 		p_ramrod->complete_event_flg = complete_event_flg;
1009 
1010 		rc = qed_spq_post(p_hwfn, p_ent, NULL);
1011 		if (rc)
1012 			return rc;
1013 	}
1014 
1015 	return rc;
1016 }
1017 
1018 static int
1019 qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
1020 			 struct qed_queue_cid *p_cid,
1021 			 bool b_eq_completion_only, bool b_cqe_completion)
1022 {
1023 	struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
1024 	struct qed_spq_entry *p_ent = NULL;
1025 	struct qed_sp_init_data init_data;
1026 	int rc;
1027 
1028 	memset(&init_data, 0, sizeof(init_data));
1029 	init_data.cid = p_cid->cid;
1030 	init_data.opaque_fid = p_cid->opaque_fid;
1031 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1032 
1033 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1034 				 ETH_RAMROD_RX_QUEUE_STOP,
1035 				 PROTOCOLID_ETH, &init_data);
1036 	if (rc)
1037 		return rc;
1038 
1039 	p_ramrod = &p_ent->ramrod.rx_queue_stop;
1040 	p_ramrod->vport_id = p_cid->abs.vport_id;
1041 	p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1042 
1043 	/* Cleaning the queue requires the completion to arrive there.
1044 	 * In addition, VFs require the answer to come as eqe to PF.
1045 	 */
1046 	p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
1047 				      !b_eq_completion_only) ||
1048 				     b_cqe_completion;
1049 	p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) ||
1050 				       b_eq_completion_only;
1051 
1052 	return qed_spq_post(p_hwfn, p_ent, NULL);
1053 }
1054 
1055 int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
1056 			  void *p_rxq,
1057 			  bool eq_completion_only, bool cqe_completion)
1058 {
1059 	struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
1060 	int rc = -EINVAL;
1061 
1062 	if (IS_PF(p_hwfn->cdev))
1063 		rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1064 					      eq_completion_only,
1065 					      cqe_completion);
1066 	else
1067 		rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1068 
1069 	if (!rc)
1070 		qed_eth_queue_cid_release(p_hwfn, p_cid);
1071 	return rc;
1072 }
1073 
1074 int
1075 qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
1076 			 struct qed_queue_cid *p_cid,
1077 			 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
1078 {
1079 	struct tx_queue_start_ramrod_data *p_ramrod = NULL;
1080 	struct qed_spq_entry *p_ent = NULL;
1081 	struct qed_sp_init_data init_data;
1082 	int rc = -EINVAL;
1083 
1084 	/* Get SPQ entry */
1085 	memset(&init_data, 0, sizeof(init_data));
1086 	init_data.cid = p_cid->cid;
1087 	init_data.opaque_fid = p_cid->opaque_fid;
1088 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1089 
1090 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1091 				 ETH_RAMROD_TX_QUEUE_START,
1092 				 PROTOCOLID_ETH, &init_data);
1093 	if (rc)
1094 		return rc;
1095 
1096 	p_ramrod = &p_ent->ramrod.tx_queue_start;
1097 	p_ramrod->vport_id = p_cid->abs.vport_id;
1098 
1099 	p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
1100 	p_ramrod->sb_index = p_cid->sb_idx;
1101 	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1102 
1103 	p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
1104 	p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
1105 
1106 	p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1107 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1108 
1109 	p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1110 
1111 	return qed_spq_post(p_hwfn, p_ent, NULL);
1112 }
1113 
1114 static int
1115 qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
1116 			  struct qed_queue_cid *p_cid,
1117 			  u8 tc,
1118 			  dma_addr_t pbl_addr,
1119 			  u16 pbl_size, void __iomem **pp_doorbell)
1120 {
1121 	int rc;
1122 
1123 
1124 	rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
1125 				      pbl_addr, pbl_size,
1126 				      qed_get_cm_pq_idx_mcos(p_hwfn, tc));
1127 	if (rc)
1128 		return rc;
1129 
1130 	/* Provide the caller with the necessary return values */
1131 	*pp_doorbell = p_hwfn->doorbells +
1132 		       qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
1133 
1134 	return 0;
1135 }
1136 
1137 static int
1138 qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
1139 		       u16 opaque_fid,
1140 		       struct qed_queue_start_common_params *p_params,
1141 		       u8 tc,
1142 		       dma_addr_t pbl_addr,
1143 		       u16 pbl_size,
1144 		       struct qed_txq_start_ret_params *p_ret_params)
1145 {
1146 	struct qed_queue_cid *p_cid;
1147 	int rc;
1148 
1149 	p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
1150 	if (!p_cid)
1151 		return -EINVAL;
1152 
1153 	if (IS_PF(p_hwfn->cdev))
1154 		rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1155 					       pbl_addr, pbl_size,
1156 					       &p_ret_params->p_doorbell);
1157 	else
1158 		rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
1159 					 pbl_addr, pbl_size,
1160 					 &p_ret_params->p_doorbell);
1161 
1162 	if (rc)
1163 		qed_eth_queue_cid_release(p_hwfn, p_cid);
1164 	else
1165 		p_ret_params->p_handle = (void *)p_cid;
1166 
1167 	return rc;
1168 }
1169 
1170 static int
1171 qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
1172 {
1173 	struct qed_spq_entry *p_ent = NULL;
1174 	struct qed_sp_init_data init_data;
1175 	int rc;
1176 
1177 	memset(&init_data, 0, sizeof(init_data));
1178 	init_data.cid = p_cid->cid;
1179 	init_data.opaque_fid = p_cid->opaque_fid;
1180 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1181 
1182 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1183 				 ETH_RAMROD_TX_QUEUE_STOP,
1184 				 PROTOCOLID_ETH, &init_data);
1185 	if (rc)
1186 		return rc;
1187 
1188 	return qed_spq_post(p_hwfn, p_ent, NULL);
1189 }
1190 
1191 int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
1192 {
1193 	struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
1194 	int rc;
1195 
1196 	if (IS_PF(p_hwfn->cdev))
1197 		rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1198 	else
1199 		rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
1200 
1201 	if (!rc)
1202 		qed_eth_queue_cid_release(p_hwfn, p_cid);
1203 	return rc;
1204 }
1205 
1206 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
1207 {
1208 	enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1209 
1210 	switch (opcode) {
1211 	case QED_FILTER_ADD:
1212 		action = ETH_FILTER_ACTION_ADD;
1213 		break;
1214 	case QED_FILTER_REMOVE:
1215 		action = ETH_FILTER_ACTION_REMOVE;
1216 		break;
1217 	case QED_FILTER_FLUSH:
1218 		action = ETH_FILTER_ACTION_REMOVE_ALL;
1219 		break;
1220 	default:
1221 		action = MAX_ETH_FILTER_ACTION;
1222 	}
1223 
1224 	return action;
1225 }
1226 
1227 static int
1228 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
1229 			u16 opaque_fid,
1230 			struct qed_filter_ucast *p_filter_cmd,
1231 			struct vport_filter_update_ramrod_data **pp_ramrod,
1232 			struct qed_spq_entry **pp_ent,
1233 			enum spq_mode comp_mode,
1234 			struct qed_spq_comp_cb *p_comp_data)
1235 {
1236 	u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1237 	struct vport_filter_update_ramrod_data *p_ramrod;
1238 	struct eth_filter_cmd *p_first_filter;
1239 	struct eth_filter_cmd *p_second_filter;
1240 	struct qed_sp_init_data init_data;
1241 	enum eth_filter_action action;
1242 	int rc;
1243 
1244 	rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1245 			  &vport_to_remove_from);
1246 	if (rc)
1247 		return rc;
1248 
1249 	rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1250 			  &vport_to_add_to);
1251 	if (rc)
1252 		return rc;
1253 
1254 	/* Get SPQ entry */
1255 	memset(&init_data, 0, sizeof(init_data));
1256 	init_data.cid = qed_spq_get_cid(p_hwfn);
1257 	init_data.opaque_fid = opaque_fid;
1258 	init_data.comp_mode = comp_mode;
1259 	init_data.p_comp_data = p_comp_data;
1260 
1261 	rc = qed_sp_init_request(p_hwfn, pp_ent,
1262 				 ETH_RAMROD_FILTERS_UPDATE,
1263 				 PROTOCOLID_ETH, &init_data);
1264 	if (rc)
1265 		return rc;
1266 
1267 	*pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1268 	p_ramrod = *pp_ramrod;
1269 	p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1270 	p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1271 
1272 	switch (p_filter_cmd->opcode) {
1273 	case QED_FILTER_REPLACE:
1274 	case QED_FILTER_MOVE:
1275 		p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
1276 	default:
1277 		p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
1278 	}
1279 
1280 	p_first_filter	= &p_ramrod->filter_cmds[0];
1281 	p_second_filter = &p_ramrod->filter_cmds[1];
1282 
1283 	switch (p_filter_cmd->type) {
1284 	case QED_FILTER_MAC:
1285 		p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
1286 	case QED_FILTER_VLAN:
1287 		p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
1288 	case QED_FILTER_MAC_VLAN:
1289 		p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
1290 	case QED_FILTER_INNER_MAC:
1291 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
1292 	case QED_FILTER_INNER_VLAN:
1293 		p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
1294 	case QED_FILTER_INNER_PAIR:
1295 		p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
1296 	case QED_FILTER_INNER_MAC_VNI_PAIR:
1297 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1298 		break;
1299 	case QED_FILTER_MAC_VNI_PAIR:
1300 		p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1301 	case QED_FILTER_VNI:
1302 		p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1303 	}
1304 
1305 	if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1306 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1307 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1308 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1309 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1310 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
1311 		qed_set_fw_mac_addr(&p_first_filter->mac_msb,
1312 				    &p_first_filter->mac_mid,
1313 				    &p_first_filter->mac_lsb,
1314 				    (u8 *)p_filter_cmd->mac);
1315 	}
1316 
1317 	if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1318 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1319 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1320 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1321 		p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
1322 
1323 	if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1324 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1325 	    (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1326 		p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
1327 
1328 	if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
1329 		p_second_filter->type = p_first_filter->type;
1330 		p_second_filter->mac_msb = p_first_filter->mac_msb;
1331 		p_second_filter->mac_mid = p_first_filter->mac_mid;
1332 		p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1333 		p_second_filter->vlan_id = p_first_filter->vlan_id;
1334 		p_second_filter->vni = p_first_filter->vni;
1335 
1336 		p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1337 
1338 		p_first_filter->vport_id = vport_to_remove_from;
1339 
1340 		p_second_filter->action = ETH_FILTER_ACTION_ADD;
1341 		p_second_filter->vport_id = vport_to_add_to;
1342 	} else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
1343 		p_first_filter->vport_id = vport_to_add_to;
1344 		memcpy(p_second_filter, p_first_filter,
1345 		       sizeof(*p_second_filter));
1346 		p_first_filter->action	= ETH_FILTER_ACTION_REMOVE_ALL;
1347 		p_second_filter->action = ETH_FILTER_ACTION_ADD;
1348 	} else {
1349 		action = qed_filter_action(p_filter_cmd->opcode);
1350 
1351 		if (action == MAX_ETH_FILTER_ACTION) {
1352 			DP_NOTICE(p_hwfn,
1353 				  "%d is not supported yet\n",
1354 				  p_filter_cmd->opcode);
1355 			return -EINVAL;
1356 		}
1357 
1358 		p_first_filter->action = action;
1359 		p_first_filter->vport_id = (p_filter_cmd->opcode ==
1360 					    QED_FILTER_REMOVE) ?
1361 					   vport_to_remove_from :
1362 					   vport_to_add_to;
1363 	}
1364 
1365 	return 0;
1366 }
1367 
1368 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1369 			    u16 opaque_fid,
1370 			    struct qed_filter_ucast *p_filter_cmd,
1371 			    enum spq_mode comp_mode,
1372 			    struct qed_spq_comp_cb *p_comp_data)
1373 {
1374 	struct vport_filter_update_ramrod_data	*p_ramrod	= NULL;
1375 	struct qed_spq_entry			*p_ent		= NULL;
1376 	struct eth_filter_cmd_header		*p_header;
1377 	int					rc;
1378 
1379 	rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1380 				     &p_ramrod, &p_ent,
1381 				     comp_mode, p_comp_data);
1382 	if (rc) {
1383 		DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1384 		return rc;
1385 	}
1386 	p_header = &p_ramrod->filter_cmd_hdr;
1387 	p_header->assert_on_error = p_filter_cmd->assert_on_error;
1388 
1389 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1390 	if (rc) {
1391 		DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1392 		return rc;
1393 	}
1394 
1395 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
1396 		   "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1397 		   (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1398 		   ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1399 		   "REMOVE" :
1400 		   ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1401 		    "MOVE" : "REPLACE")),
1402 		   (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1403 		   ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1404 		    "VLAN" : "MAC & VLAN"),
1405 		   p_ramrod->filter_cmd_hdr.cmd_cnt,
1406 		   p_filter_cmd->is_rx_filter,
1407 		   p_filter_cmd->is_tx_filter);
1408 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
1409 		   "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1410 		   p_filter_cmd->vport_to_add_to,
1411 		   p_filter_cmd->vport_to_remove_from,
1412 		   p_filter_cmd->mac[0],
1413 		   p_filter_cmd->mac[1],
1414 		   p_filter_cmd->mac[2],
1415 		   p_filter_cmd->mac[3],
1416 		   p_filter_cmd->mac[4],
1417 		   p_filter_cmd->mac[5],
1418 		   p_filter_cmd->vlan);
1419 
1420 	return 0;
1421 }
1422 
1423 /*******************************************************************************
1424  * Description:
1425  *         Calculates crc 32 on a buffer
1426  *         Note: crc32_length MUST be aligned to 8
1427  * Return:
1428  ******************************************************************************/
1429 static u32 qed_calc_crc32c(u8 *crc32_packet,
1430 			   u32 crc32_length, u32 crc32_seed, u8 complement)
1431 {
1432 	u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1433 	u8 msb = 0, current_byte = 0;
1434 
1435 	if ((!crc32_packet) ||
1436 	    (crc32_length == 0) ||
1437 	    ((crc32_length % 8) != 0))
1438 		return crc32_result;
1439 	for (byte = 0; byte < crc32_length; byte++) {
1440 		current_byte = crc32_packet[byte];
1441 		for (bit = 0; bit < 8; bit++) {
1442 			msb = (u8)(crc32_result >> 31);
1443 			crc32_result = crc32_result << 1;
1444 			if (msb != (0x1 & (current_byte >> bit))) {
1445 				crc32_result = crc32_result ^ CRC32_POLY;
1446 				crc32_result |= 1; /*crc32_result[0] = 1;*/
1447 			}
1448 		}
1449 	}
1450 	return crc32_result;
1451 }
1452 
1453 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
1454 {
1455 	u32 packet_buf[2] = { 0 };
1456 
1457 	memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1458 	return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1459 }
1460 
1461 u8 qed_mcast_bin_from_mac(u8 *mac)
1462 {
1463 	u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1464 				mac, ETH_ALEN);
1465 
1466 	return crc & 0xff;
1467 }
1468 
1469 static int
1470 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1471 			u16 opaque_fid,
1472 			struct qed_filter_mcast *p_filter_cmd,
1473 			enum spq_mode comp_mode,
1474 			struct qed_spq_comp_cb *p_comp_data)
1475 {
1476 	unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1477 	struct vport_update_ramrod_data *p_ramrod = NULL;
1478 	struct qed_spq_entry *p_ent = NULL;
1479 	struct qed_sp_init_data init_data;
1480 	u8 abs_vport_id = 0;
1481 	int rc, i;
1482 
1483 	if (p_filter_cmd->opcode == QED_FILTER_ADD)
1484 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1485 				  &abs_vport_id);
1486 	else
1487 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1488 				  &abs_vport_id);
1489 	if (rc)
1490 		return rc;
1491 
1492 	/* Get SPQ entry */
1493 	memset(&init_data, 0, sizeof(init_data));
1494 	init_data.cid = qed_spq_get_cid(p_hwfn);
1495 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1496 	init_data.comp_mode = comp_mode;
1497 	init_data.p_comp_data = p_comp_data;
1498 
1499 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1500 				 ETH_RAMROD_VPORT_UPDATE,
1501 				 PROTOCOLID_ETH, &init_data);
1502 	if (rc) {
1503 		DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1504 		return rc;
1505 	}
1506 
1507 	p_ramrod = &p_ent->ramrod.vport_update;
1508 	p_ramrod->common.update_approx_mcast_flg = 1;
1509 
1510 	/* explicitly clear out the entire vector */
1511 	memset(&p_ramrod->approx_mcast.bins, 0,
1512 	       sizeof(p_ramrod->approx_mcast.bins));
1513 	memset(bins, 0, sizeof(unsigned long) *
1514 	       ETH_MULTICAST_MAC_BINS_IN_REGS);
1515 	/* filter ADD op is explicit set op and it removes
1516 	 *  any existing filters for the vport
1517 	 */
1518 	if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1519 		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1520 			u32 bit;
1521 
1522 			bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1523 			__set_bit(bit, bins);
1524 		}
1525 
1526 		/* Convert to correct endianity */
1527 		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1528 			struct vport_update_ramrod_mcast *p_ramrod_bins;
1529 			u32 *p_bins = (u32 *)bins;
1530 
1531 			p_ramrod_bins = &p_ramrod->approx_mcast;
1532 			p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
1533 		}
1534 	}
1535 
1536 	p_ramrod->common.vport_id = abs_vport_id;
1537 
1538 	return qed_spq_post(p_hwfn, p_ent, NULL);
1539 }
1540 
1541 static int qed_filter_mcast_cmd(struct qed_dev *cdev,
1542 				struct qed_filter_mcast *p_filter_cmd,
1543 				enum spq_mode comp_mode,
1544 				struct qed_spq_comp_cb *p_comp_data)
1545 {
1546 	int rc = 0;
1547 	int i;
1548 
1549 	/* only ADD and REMOVE operations are supported for multi-cast */
1550 	if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1551 	     (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1552 	    (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1553 		return -EINVAL;
1554 
1555 	for_each_hwfn(cdev, i) {
1556 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1557 
1558 		u16 opaque_fid;
1559 
1560 		if (IS_VF(cdev)) {
1561 			qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1562 			continue;
1563 		}
1564 
1565 		opaque_fid = p_hwfn->hw_info.opaque_fid;
1566 
1567 		rc = qed_sp_eth_filter_mcast(p_hwfn,
1568 					     opaque_fid,
1569 					     p_filter_cmd,
1570 					     comp_mode, p_comp_data);
1571 	}
1572 	return rc;
1573 }
1574 
1575 static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1576 				struct qed_filter_ucast *p_filter_cmd,
1577 				enum spq_mode comp_mode,
1578 				struct qed_spq_comp_cb *p_comp_data)
1579 {
1580 	int rc = 0;
1581 	int i;
1582 
1583 	for_each_hwfn(cdev, i) {
1584 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1585 		u16 opaque_fid;
1586 
1587 		if (IS_VF(cdev)) {
1588 			rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1589 			continue;
1590 		}
1591 
1592 		opaque_fid = p_hwfn->hw_info.opaque_fid;
1593 
1594 		rc = qed_sp_eth_filter_ucast(p_hwfn,
1595 					     opaque_fid,
1596 					     p_filter_cmd,
1597 					     comp_mode, p_comp_data);
1598 		if (rc)
1599 			break;
1600 	}
1601 
1602 	return rc;
1603 }
1604 
1605 /* Statistics related code */
1606 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1607 					   u32 *p_addr,
1608 					   u32 *p_len, u16 statistics_bin)
1609 {
1610 	if (IS_PF(p_hwfn->cdev)) {
1611 		*p_addr = BAR0_MAP_REG_PSDM_RAM +
1612 		    PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1613 		*p_len = sizeof(struct eth_pstorm_per_queue_stat);
1614 	} else {
1615 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1616 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1617 
1618 		*p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1619 		*p_len = p_resp->pfdev_info.stats_info.pstats.len;
1620 	}
1621 }
1622 
1623 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
1624 				   struct qed_ptt *p_ptt,
1625 				   struct qed_eth_stats *p_stats,
1626 				   u16 statistics_bin)
1627 {
1628 	struct eth_pstorm_per_queue_stat pstats;
1629 	u32 pstats_addr = 0, pstats_len = 0;
1630 
1631 	__qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1632 				       statistics_bin);
1633 
1634 	memset(&pstats, 0, sizeof(pstats));
1635 	qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1636 
1637 	p_stats->common.tx_ucast_bytes +=
1638 	    HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1639 	p_stats->common.tx_mcast_bytes +=
1640 	    HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1641 	p_stats->common.tx_bcast_bytes +=
1642 	    HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1643 	p_stats->common.tx_ucast_pkts +=
1644 	    HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1645 	p_stats->common.tx_mcast_pkts +=
1646 	    HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1647 	p_stats->common.tx_bcast_pkts +=
1648 	    HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1649 	p_stats->common.tx_err_drop_pkts +=
1650 	    HILO_64_REGPAIR(pstats.error_drop_pkts);
1651 }
1652 
1653 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
1654 				   struct qed_ptt *p_ptt,
1655 				   struct qed_eth_stats *p_stats,
1656 				   u16 statistics_bin)
1657 {
1658 	struct tstorm_per_port_stat tstats;
1659 	u32 tstats_addr, tstats_len;
1660 
1661 	if (IS_PF(p_hwfn->cdev)) {
1662 		tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1663 		    TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1664 		tstats_len = sizeof(struct tstorm_per_port_stat);
1665 	} else {
1666 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1667 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1668 
1669 		tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1670 		tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1671 	}
1672 
1673 	memset(&tstats, 0, sizeof(tstats));
1674 	qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1675 
1676 	p_stats->common.mftag_filter_discards +=
1677 	    HILO_64_REGPAIR(tstats.mftag_filter_discard);
1678 	p_stats->common.mac_filter_discards +=
1679 	    HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1680 	p_stats->common.gft_filter_drop +=
1681 		HILO_64_REGPAIR(tstats.eth_gft_drop_pkt);
1682 }
1683 
1684 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1685 					   u32 *p_addr,
1686 					   u32 *p_len, u16 statistics_bin)
1687 {
1688 	if (IS_PF(p_hwfn->cdev)) {
1689 		*p_addr = BAR0_MAP_REG_USDM_RAM +
1690 		    USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1691 		*p_len = sizeof(struct eth_ustorm_per_queue_stat);
1692 	} else {
1693 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1694 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1695 
1696 		*p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1697 		*p_len = p_resp->pfdev_info.stats_info.ustats.len;
1698 	}
1699 }
1700 
1701 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
1702 				   struct qed_ptt *p_ptt,
1703 				   struct qed_eth_stats *p_stats,
1704 				   u16 statistics_bin)
1705 {
1706 	struct eth_ustorm_per_queue_stat ustats;
1707 	u32 ustats_addr = 0, ustats_len = 0;
1708 
1709 	__qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1710 				       statistics_bin);
1711 
1712 	memset(&ustats, 0, sizeof(ustats));
1713 	qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1714 
1715 	p_stats->common.rx_ucast_bytes +=
1716 	    HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1717 	p_stats->common.rx_mcast_bytes +=
1718 	    HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1719 	p_stats->common.rx_bcast_bytes +=
1720 	    HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1721 	p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1722 	p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1723 	p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1724 }
1725 
1726 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1727 					   u32 *p_addr,
1728 					   u32 *p_len, u16 statistics_bin)
1729 {
1730 	if (IS_PF(p_hwfn->cdev)) {
1731 		*p_addr = BAR0_MAP_REG_MSDM_RAM +
1732 		    MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1733 		*p_len = sizeof(struct eth_mstorm_per_queue_stat);
1734 	} else {
1735 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1736 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1737 
1738 		*p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1739 		*p_len = p_resp->pfdev_info.stats_info.mstats.len;
1740 	}
1741 }
1742 
1743 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
1744 				   struct qed_ptt *p_ptt,
1745 				   struct qed_eth_stats *p_stats,
1746 				   u16 statistics_bin)
1747 {
1748 	struct eth_mstorm_per_queue_stat mstats;
1749 	u32 mstats_addr = 0, mstats_len = 0;
1750 
1751 	__qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1752 				       statistics_bin);
1753 
1754 	memset(&mstats, 0, sizeof(mstats));
1755 	qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1756 
1757 	p_stats->common.no_buff_discards +=
1758 	    HILO_64_REGPAIR(mstats.no_buff_discard);
1759 	p_stats->common.packet_too_big_discard +=
1760 	    HILO_64_REGPAIR(mstats.packet_too_big_discard);
1761 	p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1762 	p_stats->common.tpa_coalesced_pkts +=
1763 	    HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1764 	p_stats->common.tpa_coalesced_events +=
1765 	    HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1766 	p_stats->common.tpa_aborts_num +=
1767 	    HILO_64_REGPAIR(mstats.tpa_aborts_num);
1768 	p_stats->common.tpa_coalesced_bytes +=
1769 	    HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1770 }
1771 
1772 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1773 				       struct qed_ptt *p_ptt,
1774 				       struct qed_eth_stats *p_stats)
1775 {
1776 	struct qed_eth_stats_common *p_common = &p_stats->common;
1777 	struct port_stats port_stats;
1778 	int j;
1779 
1780 	memset(&port_stats, 0, sizeof(port_stats));
1781 
1782 	qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1783 			p_hwfn->mcp_info->port_addr +
1784 			offsetof(struct public_port, stats),
1785 			sizeof(port_stats));
1786 
1787 	p_common->rx_64_byte_packets += port_stats.eth.r64;
1788 	p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1789 	p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1790 	p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1791 	p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1792 	p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1793 	p_common->rx_crc_errors += port_stats.eth.rfcs;
1794 	p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1795 	p_common->rx_pause_frames += port_stats.eth.rxpf;
1796 	p_common->rx_pfc_frames += port_stats.eth.rxpp;
1797 	p_common->rx_align_errors += port_stats.eth.raln;
1798 	p_common->rx_carrier_errors += port_stats.eth.rfcr;
1799 	p_common->rx_oversize_packets += port_stats.eth.rovr;
1800 	p_common->rx_jabbers += port_stats.eth.rjbr;
1801 	p_common->rx_undersize_packets += port_stats.eth.rund;
1802 	p_common->rx_fragments += port_stats.eth.rfrg;
1803 	p_common->tx_64_byte_packets += port_stats.eth.t64;
1804 	p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1805 	p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1806 	p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1807 	p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1808 	p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1809 	p_common->tx_pause_frames += port_stats.eth.txpf;
1810 	p_common->tx_pfc_frames += port_stats.eth.txpp;
1811 	p_common->rx_mac_bytes += port_stats.eth.rbyte;
1812 	p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1813 	p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1814 	p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1815 	p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1816 	p_common->tx_mac_bytes += port_stats.eth.tbyte;
1817 	p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1818 	p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1819 	p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1820 	p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1821 	for (j = 0; j < 8; j++) {
1822 		p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1823 		p_common->brb_discards += port_stats.brb.brb_discard[j];
1824 	}
1825 
1826 	if (QED_IS_BB(p_hwfn->cdev)) {
1827 		struct qed_eth_stats_bb *p_bb = &p_stats->bb;
1828 
1829 		p_bb->rx_1519_to_1522_byte_packets +=
1830 		    port_stats.eth.u0.bb0.r1522;
1831 		p_bb->rx_1519_to_2047_byte_packets +=
1832 		    port_stats.eth.u0.bb0.r2047;
1833 		p_bb->rx_2048_to_4095_byte_packets +=
1834 		    port_stats.eth.u0.bb0.r4095;
1835 		p_bb->rx_4096_to_9216_byte_packets +=
1836 		    port_stats.eth.u0.bb0.r9216;
1837 		p_bb->rx_9217_to_16383_byte_packets +=
1838 		    port_stats.eth.u0.bb0.r16383;
1839 		p_bb->tx_1519_to_2047_byte_packets +=
1840 		    port_stats.eth.u1.bb1.t2047;
1841 		p_bb->tx_2048_to_4095_byte_packets +=
1842 		    port_stats.eth.u1.bb1.t4095;
1843 		p_bb->tx_4096_to_9216_byte_packets +=
1844 		    port_stats.eth.u1.bb1.t9216;
1845 		p_bb->tx_9217_to_16383_byte_packets +=
1846 		    port_stats.eth.u1.bb1.t16383;
1847 		p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1848 		p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1849 	} else {
1850 		struct qed_eth_stats_ah *p_ah = &p_stats->ah;
1851 
1852 		p_ah->rx_1519_to_max_byte_packets +=
1853 		    port_stats.eth.u0.ah0.r1519_to_max;
1854 		p_ah->tx_1519_to_max_byte_packets =
1855 		    port_stats.eth.u1.ah1.t1519_to_max;
1856 	}
1857 }
1858 
1859 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1860 				  struct qed_ptt *p_ptt,
1861 				  struct qed_eth_stats *stats,
1862 				  u16 statistics_bin, bool b_get_port_stats)
1863 {
1864 	__qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1865 	__qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1866 	__qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1867 	__qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1868 
1869 	if (b_get_port_stats && p_hwfn->mcp_info)
1870 		__qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1871 }
1872 
1873 static void _qed_get_vport_stats(struct qed_dev *cdev,
1874 				 struct qed_eth_stats *stats)
1875 {
1876 	u8 fw_vport = 0;
1877 	int i;
1878 
1879 	memset(stats, 0, sizeof(*stats));
1880 
1881 	for_each_hwfn(cdev, i) {
1882 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1883 		struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1884 						    :  NULL;
1885 
1886 		if (IS_PF(cdev)) {
1887 			/* The main vport index is relative first */
1888 			if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1889 				DP_ERR(p_hwfn, "No vport available!\n");
1890 				goto out;
1891 			}
1892 		}
1893 
1894 		if (IS_PF(cdev) && !p_ptt) {
1895 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1896 			continue;
1897 		}
1898 
1899 		__qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1900 				      IS_PF(cdev) ? true : false);
1901 
1902 out:
1903 		if (IS_PF(cdev) && p_ptt)
1904 			qed_ptt_release(p_hwfn, p_ptt);
1905 	}
1906 }
1907 
1908 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
1909 {
1910 	u32 i;
1911 
1912 	if (!cdev) {
1913 		memset(stats, 0, sizeof(*stats));
1914 		return;
1915 	}
1916 
1917 	_qed_get_vport_stats(cdev, stats);
1918 
1919 	if (!cdev->reset_stats)
1920 		return;
1921 
1922 	/* Reduce the statistics baseline */
1923 	for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1924 		((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1925 }
1926 
1927 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1928 void qed_reset_vport_stats(struct qed_dev *cdev)
1929 {
1930 	int i;
1931 
1932 	for_each_hwfn(cdev, i) {
1933 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1934 		struct eth_mstorm_per_queue_stat mstats;
1935 		struct eth_ustorm_per_queue_stat ustats;
1936 		struct eth_pstorm_per_queue_stat pstats;
1937 		struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1938 						    : NULL;
1939 		u32 addr = 0, len = 0;
1940 
1941 		if (IS_PF(cdev) && !p_ptt) {
1942 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1943 			continue;
1944 		}
1945 
1946 		memset(&mstats, 0, sizeof(mstats));
1947 		__qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1948 		qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1949 
1950 		memset(&ustats, 0, sizeof(ustats));
1951 		__qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1952 		qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1953 
1954 		memset(&pstats, 0, sizeof(pstats));
1955 		__qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1956 		qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1957 
1958 		if (IS_PF(cdev))
1959 			qed_ptt_release(p_hwfn, p_ptt);
1960 	}
1961 
1962 	/* PORT statistics are not necessarily reset, so we need to
1963 	 * read and create a baseline for future statistics.
1964 	 */
1965 	if (!cdev->reset_stats)
1966 		DP_INFO(cdev, "Reset stats not allocated\n");
1967 	else
1968 		_qed_get_vport_stats(cdev, cdev->reset_stats);
1969 }
1970 
1971 static enum gft_profile_type
1972 qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
1973 {
1974 	if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
1975 		return GFT_PROFILE_TYPE_4_TUPLE;
1976 	if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
1977 		return GFT_PROFILE_TYPE_IP_DST_ADDR;
1978 	if (mode == QED_FILTER_CONFIG_MODE_IP_SRC)
1979 		return GFT_PROFILE_TYPE_IP_SRC_ADDR;
1980 	return GFT_PROFILE_TYPE_L4_DST_PORT;
1981 }
1982 
1983 void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
1984 			     struct qed_ptt *p_ptt,
1985 			     struct qed_arfs_config_params *p_cfg_params)
1986 {
1987 	if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
1988 		qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
1989 			       p_cfg_params->tcp,
1990 			       p_cfg_params->udp,
1991 			       p_cfg_params->ipv4,
1992 			       p_cfg_params->ipv6,
1993 			       qed_arfs_mode_to_hsi(p_cfg_params->mode));
1994 		DP_VERBOSE(p_hwfn,
1995 			   QED_MSG_SP,
1996 			   "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
1997 			   p_cfg_params->tcp ? "Enable" : "Disable",
1998 			   p_cfg_params->udp ? "Enable" : "Disable",
1999 			   p_cfg_params->ipv4 ? "Enable" : "Disable",
2000 			   p_cfg_params->ipv6 ? "Enable" : "Disable",
2001 			   (u32)p_cfg_params->mode);
2002 	} else {
2003 		DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
2004 		qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2005 	}
2006 }
2007 
2008 int
2009 qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
2010 				struct qed_spq_comp_cb *p_cb,
2011 				struct qed_ntuple_filter_params *p_params)
2012 {
2013 	struct rx_update_gft_filter_data *p_ramrod = NULL;
2014 	struct qed_spq_entry *p_ent = NULL;
2015 	struct qed_sp_init_data init_data;
2016 	u16 abs_rx_q_id = 0;
2017 	u8 abs_vport_id = 0;
2018 	int rc = -EINVAL;
2019 
2020 	/* Get SPQ entry */
2021 	memset(&init_data, 0, sizeof(init_data));
2022 	init_data.cid = qed_spq_get_cid(p_hwfn);
2023 
2024 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2025 
2026 	if (p_cb) {
2027 		init_data.comp_mode = QED_SPQ_MODE_CB;
2028 		init_data.p_comp_data = p_cb;
2029 	} else {
2030 		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2031 	}
2032 
2033 	rc = qed_sp_init_request(p_hwfn, &p_ent,
2034 				 ETH_RAMROD_GFT_UPDATE_FILTER,
2035 				 PROTOCOLID_ETH, &init_data);
2036 	if (rc)
2037 		return rc;
2038 
2039 	p_ramrod = &p_ent->ramrod.rx_update_gft;
2040 
2041 	DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
2042 	p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
2043 
2044 	if (p_params->b_is_drop) {
2045 		p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT);
2046 	} else {
2047 		rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
2048 		if (rc)
2049 			return rc;
2050 
2051 		if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
2052 			rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
2053 					     &abs_rx_q_id);
2054 			if (rc)
2055 				return rc;
2056 
2057 			p_ramrod->rx_qid_valid = 1;
2058 			p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
2059 		}
2060 
2061 		p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
2062 	}
2063 
2064 	p_ramrod->flow_id_valid = 0;
2065 	p_ramrod->flow_id = 0;
2066 	p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
2067 	    : GFT_DELETE_FILTER;
2068 
2069 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
2070 		   "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
2071 		   abs_vport_id, abs_rx_q_id,
2072 		   p_params->b_is_add ? "Adding" : "Removing",
2073 		   (u64)p_params->addr, p_params->length);
2074 
2075 	return qed_spq_post(p_hwfn, p_ent, NULL);
2076 }
2077 
2078 int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
2079 			 struct qed_ptt *p_ptt,
2080 			 struct qed_queue_cid *p_cid, u16 *p_rx_coal)
2081 {
2082 	u32 coalesce, address, is_valid;
2083 	struct cau_sb_entry sb_entry;
2084 	u8 timer_res;
2085 	int rc;
2086 
2087 	rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2088 			       p_cid->sb_igu_id * sizeof(u64),
2089 			       (u64)(uintptr_t)&sb_entry, 2, 0);
2090 	if (rc) {
2091 		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2092 		return rc;
2093 	}
2094 
2095 	timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
2096 
2097 	address = BAR0_MAP_REG_USDM_RAM +
2098 		  USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2099 	coalesce = qed_rd(p_hwfn, p_ptt, address);
2100 
2101 	is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2102 	if (!is_valid)
2103 		return -EINVAL;
2104 
2105 	coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2106 	*p_rx_coal = (u16)(coalesce << timer_res);
2107 
2108 	return 0;
2109 }
2110 
2111 int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
2112 			 struct qed_ptt *p_ptt,
2113 			 struct qed_queue_cid *p_cid, u16 *p_tx_coal)
2114 {
2115 	u32 coalesce, address, is_valid;
2116 	struct cau_sb_entry sb_entry;
2117 	u8 timer_res;
2118 	int rc;
2119 
2120 	rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2121 			       p_cid->sb_igu_id * sizeof(u64),
2122 			       (u64)(uintptr_t)&sb_entry, 2, 0);
2123 	if (rc) {
2124 		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2125 		return rc;
2126 	}
2127 
2128 	timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
2129 
2130 	address = BAR0_MAP_REG_XSDM_RAM +
2131 		  XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2132 	coalesce = qed_rd(p_hwfn, p_ptt, address);
2133 
2134 	is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2135 	if (!is_valid)
2136 		return -EINVAL;
2137 
2138 	coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2139 	*p_tx_coal = (u16)(coalesce << timer_res);
2140 
2141 	return 0;
2142 }
2143 
2144 int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle)
2145 {
2146 	struct qed_queue_cid *p_cid = handle;
2147 	struct qed_ptt *p_ptt;
2148 	int rc = 0;
2149 
2150 	if (IS_VF(p_hwfn->cdev)) {
2151 		rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
2152 		if (rc)
2153 			DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
2154 
2155 		return rc;
2156 	}
2157 
2158 	p_ptt = qed_ptt_acquire(p_hwfn);
2159 	if (!p_ptt)
2160 		return -EAGAIN;
2161 
2162 	if (p_cid->b_is_rx) {
2163 		rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2164 		if (rc)
2165 			goto out;
2166 	} else {
2167 		rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2168 		if (rc)
2169 			goto out;
2170 	}
2171 
2172 out:
2173 	qed_ptt_release(p_hwfn, p_ptt);
2174 
2175 	return rc;
2176 }
2177 
2178 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2179 				 struct qed_dev_eth_info *info)
2180 {
2181 	int i;
2182 
2183 	memset(info, 0, sizeof(*info));
2184 
2185 	info->num_tc = 1;
2186 
2187 	if (IS_PF(cdev)) {
2188 		int max_vf_vlan_filters = 0;
2189 		int max_vf_mac_filters = 0;
2190 
2191 		if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
2192 			u16 num_queues = 0;
2193 
2194 			/* Since the feature controls only queue-zones,
2195 			 * make sure we have the contexts [rx, tx, xdp] to
2196 			 * match.
2197 			 */
2198 			for_each_hwfn(cdev, i) {
2199 				struct qed_hwfn *hwfn = &cdev->hwfns[i];
2200 				u16 l2_queues = (u16)FEAT_NUM(hwfn,
2201 							      QED_PF_L2_QUE);
2202 				u16 cids;
2203 
2204 				cids = hwfn->pf_params.eth_pf_params.num_cons;
2205 				num_queues += min_t(u16, l2_queues, cids / 3);
2206 			}
2207 
2208 			/* queues might theoretically be >256, but interrupts'
2209 			 * upper-limit guarantes that it would fit in a u8.
2210 			 */
2211 			if (cdev->int_params.fp_msix_cnt) {
2212 				u8 irqs = cdev->int_params.fp_msix_cnt;
2213 
2214 				info->num_queues = (u8)min_t(u16,
2215 							     num_queues, irqs);
2216 			}
2217 		} else {
2218 			info->num_queues = cdev->num_hwfns;
2219 		}
2220 
2221 		if (IS_QED_SRIOV(cdev)) {
2222 			max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
2223 					      QED_ETH_VF_NUM_VLAN_FILTERS;
2224 			max_vf_mac_filters = cdev->p_iov_info->total_vfs *
2225 					     QED_ETH_VF_NUM_MAC_FILTERS;
2226 		}
2227 		info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2228 						  QED_VLAN) -
2229 					 max_vf_vlan_filters;
2230 		info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2231 						 QED_MAC) -
2232 					max_vf_mac_filters;
2233 
2234 		ether_addr_copy(info->port_mac,
2235 				cdev->hwfns[0].hw_info.hw_mac_addr);
2236 
2237 		info->xdp_supported = true;
2238 	} else {
2239 		u16 total_cids = 0;
2240 
2241 		/* Determine queues &  XDP support */
2242 		for_each_hwfn(cdev, i) {
2243 			struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2244 			u8 queues, cids;
2245 
2246 			qed_vf_get_num_cids(p_hwfn, &cids);
2247 			qed_vf_get_num_rxqs(p_hwfn, &queues);
2248 			info->num_queues += queues;
2249 			total_cids += cids;
2250 		}
2251 
2252 		/* Enable VF XDP in case PF guarntees sufficient connections */
2253 		if (total_cids >= info->num_queues * 3)
2254 			info->xdp_supported = true;
2255 
2256 		qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
2257 					    (u8 *)&info->num_vlan_filters);
2258 		qed_vf_get_num_mac_filters(&cdev->hwfns[0],
2259 					   (u8 *)&info->num_mac_filters);
2260 		qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
2261 
2262 		info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
2263 	}
2264 
2265 	qed_fill_dev_info(cdev, &info->common);
2266 
2267 	if (IS_VF(cdev))
2268 		eth_zero_addr(info->common.hw_mac);
2269 
2270 	return 0;
2271 }
2272 
2273 static void qed_register_eth_ops(struct qed_dev *cdev,
2274 				 struct qed_eth_cb_ops *ops, void *cookie)
2275 {
2276 	cdev->protocol_ops.eth = ops;
2277 	cdev->ops_cookie = cookie;
2278 
2279 	/* For VF, we start bulletin reading */
2280 	if (IS_VF(cdev))
2281 		qed_vf_start_iov_wq(cdev);
2282 }
2283 
2284 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
2285 {
2286 	if (IS_PF(cdev))
2287 		return true;
2288 
2289 	return qed_vf_check_mac(&cdev->hwfns[0], mac);
2290 }
2291 
2292 static int qed_start_vport(struct qed_dev *cdev,
2293 			   struct qed_start_vport_params *params)
2294 {
2295 	int rc, i;
2296 
2297 	for_each_hwfn(cdev, i) {
2298 		struct qed_sp_vport_start_params start = { 0 };
2299 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2300 
2301 		start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
2302 							QED_TPA_MODE_NONE;
2303 		start.remove_inner_vlan = params->remove_inner_vlan;
2304 		start.only_untagged = true;	/* untagged only */
2305 		start.drop_ttl0 = params->drop_ttl0;
2306 		start.opaque_fid = p_hwfn->hw_info.opaque_fid;
2307 		start.concrete_fid = p_hwfn->hw_info.concrete_fid;
2308 		start.handle_ptp_pkts = params->handle_ptp_pkts;
2309 		start.vport_id = params->vport_id;
2310 		start.max_buffers_per_cqe = 16;
2311 		start.mtu = params->mtu;
2312 
2313 		rc = qed_sp_vport_start(p_hwfn, &start);
2314 		if (rc) {
2315 			DP_ERR(cdev, "Failed to start VPORT\n");
2316 			return rc;
2317 		}
2318 
2319 		rc = qed_hw_start_fastpath(p_hwfn);
2320 		if (rc) {
2321 			DP_ERR(cdev, "Failed to start VPORT fastpath\n");
2322 			return rc;
2323 		}
2324 
2325 		DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2326 			   "Started V-PORT %d with MTU %d\n",
2327 			   start.vport_id, start.mtu);
2328 	}
2329 
2330 	if (params->clear_stats)
2331 		qed_reset_vport_stats(cdev);
2332 
2333 	return 0;
2334 }
2335 
2336 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
2337 {
2338 	int rc, i;
2339 
2340 	for_each_hwfn(cdev, i) {
2341 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2342 
2343 		rc = qed_sp_vport_stop(p_hwfn,
2344 				       p_hwfn->hw_info.opaque_fid, vport_id);
2345 
2346 		if (rc) {
2347 			DP_ERR(cdev, "Failed to stop VPORT\n");
2348 			return rc;
2349 		}
2350 	}
2351 	return 0;
2352 }
2353 
2354 static int qed_update_vport_rss(struct qed_dev *cdev,
2355 				struct qed_update_vport_rss_params *input,
2356 				struct qed_rss_params *rss)
2357 {
2358 	int i, fn;
2359 
2360 	/* Update configuration with what's correct regardless of CMT */
2361 	rss->update_rss_config = 1;
2362 	rss->rss_enable = 1;
2363 	rss->update_rss_capabilities = 1;
2364 	rss->update_rss_ind_table = 1;
2365 	rss->update_rss_key = 1;
2366 	rss->rss_caps = input->rss_caps;
2367 	memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
2368 
2369 	/* In regular scenario, we'd simply need to take input handlers.
2370 	 * But in CMT, we'd have to split the handlers according to the
2371 	 * engine they were configured on. We'd then have to understand
2372 	 * whether RSS is really required, since 2-queues on CMT doesn't
2373 	 * require RSS.
2374 	 */
2375 	if (cdev->num_hwfns == 1) {
2376 		memcpy(rss->rss_ind_table,
2377 		       input->rss_ind_table,
2378 		       QED_RSS_IND_TABLE_SIZE * sizeof(void *));
2379 		rss->rss_table_size_log = 7;
2380 		return 0;
2381 	}
2382 
2383 	/* Start by copying the non-spcific information to the 2nd copy */
2384 	memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
2385 
2386 	/* CMT should be round-robin */
2387 	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
2388 		struct qed_queue_cid *cid = input->rss_ind_table[i];
2389 		struct qed_rss_params *t_rss;
2390 
2391 		if (cid->p_owner == QED_LEADING_HWFN(cdev))
2392 			t_rss = &rss[0];
2393 		else
2394 			t_rss = &rss[1];
2395 
2396 		t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
2397 	}
2398 
2399 	/* Make sure RSS is actually required */
2400 	for_each_hwfn(cdev, fn) {
2401 		for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
2402 			if (rss[fn].rss_ind_table[i] !=
2403 			    rss[fn].rss_ind_table[0])
2404 				break;
2405 		}
2406 		if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
2407 			DP_VERBOSE(cdev, NETIF_MSG_IFUP,
2408 				   "CMT - 1 queue per-hwfn; Disabling RSS\n");
2409 			return -EINVAL;
2410 		}
2411 		rss[fn].rss_table_size_log = 6;
2412 	}
2413 
2414 	return 0;
2415 }
2416 
2417 static int qed_update_vport(struct qed_dev *cdev,
2418 			    struct qed_update_vport_params *params)
2419 {
2420 	struct qed_sp_vport_update_params sp_params;
2421 	struct qed_rss_params *rss;
2422 	int rc = 0, i;
2423 
2424 	if (!cdev)
2425 		return -ENODEV;
2426 
2427 	rss = vzalloc(sizeof(*rss) * cdev->num_hwfns);
2428 	if (!rss)
2429 		return -ENOMEM;
2430 
2431 	memset(&sp_params, 0, sizeof(sp_params));
2432 
2433 	/* Translate protocol params into sp params */
2434 	sp_params.vport_id = params->vport_id;
2435 	sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
2436 	sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
2437 	sp_params.vport_active_rx_flg = params->vport_active_flg;
2438 	sp_params.vport_active_tx_flg = params->vport_active_flg;
2439 	sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
2440 	sp_params.tx_switching_flg = params->tx_switching_flg;
2441 	sp_params.accept_any_vlan = params->accept_any_vlan;
2442 	sp_params.update_accept_any_vlan_flg =
2443 		params->update_accept_any_vlan_flg;
2444 
2445 	/* Prepare the RSS configuration */
2446 	if (params->update_rss_flg)
2447 		if (qed_update_vport_rss(cdev, &params->rss_params, rss))
2448 			params->update_rss_flg = 0;
2449 
2450 	for_each_hwfn(cdev, i) {
2451 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2452 
2453 		if (params->update_rss_flg)
2454 			sp_params.rss_params = &rss[i];
2455 
2456 		sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2457 		rc = qed_sp_vport_update(p_hwfn, &sp_params,
2458 					 QED_SPQ_MODE_EBLOCK,
2459 					 NULL);
2460 		if (rc) {
2461 			DP_ERR(cdev, "Failed to update VPORT\n");
2462 			goto out;
2463 		}
2464 
2465 		DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2466 			   "Updated V-PORT %d: active_flag %d [update %d]\n",
2467 			   params->vport_id, params->vport_active_flg,
2468 			   params->update_vport_active_flg);
2469 	}
2470 
2471 out:
2472 	vfree(rss);
2473 	return rc;
2474 }
2475 
2476 static int qed_start_rxq(struct qed_dev *cdev,
2477 			 u8 rss_num,
2478 			 struct qed_queue_start_common_params *p_params,
2479 			 u16 bd_max_bytes,
2480 			 dma_addr_t bd_chain_phys_addr,
2481 			 dma_addr_t cqe_pbl_addr,
2482 			 u16 cqe_pbl_size,
2483 			 struct qed_rxq_start_ret_params *ret_params)
2484 {
2485 	struct qed_hwfn *p_hwfn;
2486 	int rc, hwfn_index;
2487 
2488 	hwfn_index = rss_num % cdev->num_hwfns;
2489 	p_hwfn = &cdev->hwfns[hwfn_index];
2490 
2491 	p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2492 	p_params->stats_id = p_params->vport_id;
2493 
2494 	rc = qed_eth_rx_queue_start(p_hwfn,
2495 				    p_hwfn->hw_info.opaque_fid,
2496 				    p_params,
2497 				    bd_max_bytes,
2498 				    bd_chain_phys_addr,
2499 				    cqe_pbl_addr, cqe_pbl_size, ret_params);
2500 	if (rc) {
2501 		DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
2502 		return rc;
2503 	}
2504 
2505 	DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2506 		   "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2507 		   p_params->queue_id, rss_num, p_params->vport_id,
2508 		   p_params->p_sb->igu_sb_id);
2509 
2510 	return 0;
2511 }
2512 
2513 static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
2514 {
2515 	int rc, hwfn_index;
2516 	struct qed_hwfn *p_hwfn;
2517 
2518 	hwfn_index = rss_id % cdev->num_hwfns;
2519 	p_hwfn = &cdev->hwfns[hwfn_index];
2520 
2521 	rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
2522 	if (rc) {
2523 		DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
2524 		return rc;
2525 	}
2526 
2527 	return 0;
2528 }
2529 
2530 static int qed_start_txq(struct qed_dev *cdev,
2531 			 u8 rss_num,
2532 			 struct qed_queue_start_common_params *p_params,
2533 			 dma_addr_t pbl_addr,
2534 			 u16 pbl_size,
2535 			 struct qed_txq_start_ret_params *ret_params)
2536 {
2537 	struct qed_hwfn *p_hwfn;
2538 	int rc, hwfn_index;
2539 
2540 	hwfn_index = rss_num % cdev->num_hwfns;
2541 	p_hwfn = &cdev->hwfns[hwfn_index];
2542 	p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2543 	p_params->stats_id = p_params->vport_id;
2544 
2545 	rc = qed_eth_tx_queue_start(p_hwfn,
2546 				    p_hwfn->hw_info.opaque_fid,
2547 				    p_params, 0,
2548 				    pbl_addr, pbl_size, ret_params);
2549 
2550 	if (rc) {
2551 		DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
2552 		return rc;
2553 	}
2554 
2555 	DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2556 		   "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2557 		   p_params->queue_id, rss_num, p_params->vport_id,
2558 		   p_params->p_sb->igu_sb_id);
2559 
2560 	return 0;
2561 }
2562 
2563 #define QED_HW_STOP_RETRY_LIMIT (10)
2564 static int qed_fastpath_stop(struct qed_dev *cdev)
2565 {
2566 	int rc;
2567 
2568 	rc = qed_hw_stop_fastpath(cdev);
2569 	if (rc) {
2570 		DP_ERR(cdev, "Failed to stop Fastpath\n");
2571 		return rc;
2572 	}
2573 
2574 	return 0;
2575 }
2576 
2577 static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
2578 {
2579 	struct qed_hwfn *p_hwfn;
2580 	int rc, hwfn_index;
2581 
2582 	hwfn_index = rss_id % cdev->num_hwfns;
2583 	p_hwfn = &cdev->hwfns[hwfn_index];
2584 
2585 	rc = qed_eth_tx_queue_stop(p_hwfn, handle);
2586 	if (rc) {
2587 		DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
2588 		return rc;
2589 	}
2590 
2591 	return 0;
2592 }
2593 
2594 static int qed_tunn_configure(struct qed_dev *cdev,
2595 			      struct qed_tunn_params *tunn_params)
2596 {
2597 	struct qed_tunnel_info tunn_info;
2598 	int i, rc;
2599 
2600 	memset(&tunn_info, 0, sizeof(tunn_info));
2601 	if (tunn_params->update_vxlan_port) {
2602 		tunn_info.vxlan_port.b_update_port = true;
2603 		tunn_info.vxlan_port.port = tunn_params->vxlan_port;
2604 	}
2605 
2606 	if (tunn_params->update_geneve_port) {
2607 		tunn_info.geneve_port.b_update_port = true;
2608 		tunn_info.geneve_port.port = tunn_params->geneve_port;
2609 	}
2610 
2611 	for_each_hwfn(cdev, i) {
2612 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
2613 		struct qed_ptt *p_ptt;
2614 		struct qed_tunnel_info *tun;
2615 
2616 		tun = &hwfn->cdev->tunnel;
2617 		if (IS_PF(cdev)) {
2618 			p_ptt = qed_ptt_acquire(hwfn);
2619 			if (!p_ptt)
2620 				return -EAGAIN;
2621 		} else {
2622 			p_ptt = NULL;
2623 		}
2624 
2625 		rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
2626 					       QED_SPQ_MODE_EBLOCK, NULL);
2627 		if (rc) {
2628 			if (IS_PF(cdev))
2629 				qed_ptt_release(hwfn, p_ptt);
2630 			return rc;
2631 		}
2632 
2633 		if (IS_PF_SRIOV(hwfn)) {
2634 			u16 vxlan_port, geneve_port;
2635 			int j;
2636 
2637 			vxlan_port = tun->vxlan_port.port;
2638 			geneve_port = tun->geneve_port.port;
2639 
2640 			qed_for_each_vf(hwfn, j) {
2641 				qed_iov_bulletin_set_udp_ports(hwfn, j,
2642 							       vxlan_port,
2643 							       geneve_port);
2644 			}
2645 
2646 			qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2647 		}
2648 		if (IS_PF(cdev))
2649 			qed_ptt_release(hwfn, p_ptt);
2650 	}
2651 
2652 	return 0;
2653 }
2654 
2655 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2656 					enum qed_filter_rx_mode_type type)
2657 {
2658 	struct qed_filter_accept_flags accept_flags;
2659 
2660 	memset(&accept_flags, 0, sizeof(accept_flags));
2661 
2662 	accept_flags.update_rx_mode_config = 1;
2663 	accept_flags.update_tx_mode_config = 1;
2664 	accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2665 					QED_ACCEPT_MCAST_MATCHED |
2666 					QED_ACCEPT_BCAST;
2667 	accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2668 					QED_ACCEPT_MCAST_MATCHED |
2669 					QED_ACCEPT_BCAST;
2670 
2671 	if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2672 		accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2673 						 QED_ACCEPT_MCAST_UNMATCHED;
2674 		accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2675 	} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2676 		accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2677 		accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2678 	}
2679 
2680 	return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
2681 				     QED_SPQ_MODE_CB, NULL);
2682 }
2683 
2684 static int qed_configure_filter_ucast(struct qed_dev *cdev,
2685 				      struct qed_filter_ucast_params *params)
2686 {
2687 	struct qed_filter_ucast ucast;
2688 
2689 	if (!params->vlan_valid && !params->mac_valid) {
2690 		DP_NOTICE(cdev,
2691 			  "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
2692 		return -EINVAL;
2693 	}
2694 
2695 	memset(&ucast, 0, sizeof(ucast));
2696 	switch (params->type) {
2697 	case QED_FILTER_XCAST_TYPE_ADD:
2698 		ucast.opcode = QED_FILTER_ADD;
2699 		break;
2700 	case QED_FILTER_XCAST_TYPE_DEL:
2701 		ucast.opcode = QED_FILTER_REMOVE;
2702 		break;
2703 	case QED_FILTER_XCAST_TYPE_REPLACE:
2704 		ucast.opcode = QED_FILTER_REPLACE;
2705 		break;
2706 	default:
2707 		DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
2708 			  params->type);
2709 	}
2710 
2711 	if (params->vlan_valid && params->mac_valid) {
2712 		ucast.type = QED_FILTER_MAC_VLAN;
2713 		ether_addr_copy(ucast.mac, params->mac);
2714 		ucast.vlan = params->vlan;
2715 	} else if (params->mac_valid) {
2716 		ucast.type = QED_FILTER_MAC;
2717 		ether_addr_copy(ucast.mac, params->mac);
2718 	} else {
2719 		ucast.type = QED_FILTER_VLAN;
2720 		ucast.vlan = params->vlan;
2721 	}
2722 
2723 	ucast.is_rx_filter = true;
2724 	ucast.is_tx_filter = true;
2725 
2726 	return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
2727 }
2728 
2729 static int qed_configure_filter_mcast(struct qed_dev *cdev,
2730 				      struct qed_filter_mcast_params *params)
2731 {
2732 	struct qed_filter_mcast mcast;
2733 	int i;
2734 
2735 	memset(&mcast, 0, sizeof(mcast));
2736 	switch (params->type) {
2737 	case QED_FILTER_XCAST_TYPE_ADD:
2738 		mcast.opcode = QED_FILTER_ADD;
2739 		break;
2740 	case QED_FILTER_XCAST_TYPE_DEL:
2741 		mcast.opcode = QED_FILTER_REMOVE;
2742 		break;
2743 	default:
2744 		DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
2745 			  params->type);
2746 	}
2747 
2748 	mcast.num_mc_addrs = params->num;
2749 	for (i = 0; i < mcast.num_mc_addrs; i++)
2750 		ether_addr_copy(mcast.mac[i], params->mac[i]);
2751 
2752 	return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
2753 }
2754 
2755 static int qed_configure_filter(struct qed_dev *cdev,
2756 				struct qed_filter_params *params)
2757 {
2758 	enum qed_filter_rx_mode_type accept_flags;
2759 
2760 	switch (params->type) {
2761 	case QED_FILTER_TYPE_UCAST:
2762 		return qed_configure_filter_ucast(cdev, &params->filter.ucast);
2763 	case QED_FILTER_TYPE_MCAST:
2764 		return qed_configure_filter_mcast(cdev, &params->filter.mcast);
2765 	case QED_FILTER_TYPE_RX_MODE:
2766 		accept_flags = params->filter.accept_flags;
2767 		return qed_configure_filter_rx_mode(cdev, accept_flags);
2768 	default:
2769 		DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
2770 		return -EINVAL;
2771 	}
2772 }
2773 
2774 static int qed_configure_arfs_searcher(struct qed_dev *cdev,
2775 				       enum qed_filter_config_mode mode)
2776 {
2777 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2778 	struct qed_arfs_config_params arfs_config_params;
2779 
2780 	memset(&arfs_config_params, 0, sizeof(arfs_config_params));
2781 	arfs_config_params.tcp = true;
2782 	arfs_config_params.udp = true;
2783 	arfs_config_params.ipv4 = true;
2784 	arfs_config_params.ipv6 = true;
2785 	arfs_config_params.mode = mode;
2786 	qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
2787 				&arfs_config_params);
2788 	return 0;
2789 }
2790 
2791 static void
2792 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
2793 			     void *cookie,
2794 			     union event_ring_data *data, u8 fw_return_code)
2795 {
2796 	struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
2797 	void *dev = p_hwfn->cdev->ops_cookie;
2798 
2799 	op->arfs_filter_op(dev, cookie, fw_return_code);
2800 }
2801 
2802 static int
2803 qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
2804 			      void *cookie,
2805 			      struct qed_ntuple_filter_params *params)
2806 {
2807 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2808 	struct qed_spq_comp_cb cb;
2809 	int rc = -EINVAL;
2810 
2811 	cb.function = qed_arfs_sp_response_handler;
2812 	cb.cookie = cookie;
2813 
2814 	if (params->b_is_vf) {
2815 		if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
2816 					   false)) {
2817 			DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
2818 				params->vf_id);
2819 			return rc;
2820 		}
2821 
2822 		params->vport_id = params->vf_id + 1;
2823 		params->qid = QED_RFS_NTUPLE_QID_RSS;
2824 	}
2825 
2826 	rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
2827 	if (rc)
2828 		DP_NOTICE(p_hwfn,
2829 			  "Failed to issue a-RFS filter configuration\n");
2830 	else
2831 		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
2832 			   "Successfully issued a-RFS filter configuration\n");
2833 
2834 	return rc;
2835 }
2836 
2837 static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
2838 {
2839 	struct qed_queue_cid *p_cid = handle;
2840 	struct qed_hwfn *p_hwfn;
2841 	int rc;
2842 
2843 	p_hwfn = p_cid->p_owner;
2844 	rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
2845 	if (rc)
2846 		DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
2847 
2848 	return rc;
2849 }
2850 
2851 static int qed_fp_cqe_completion(struct qed_dev *dev,
2852 				 u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
2853 {
2854 	return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2855 				      cqe);
2856 }
2857 
2858 static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac)
2859 {
2860 	int i, ret;
2861 
2862 	if (IS_PF(cdev))
2863 		return 0;
2864 
2865 	for_each_hwfn(cdev, i) {
2866 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2867 
2868 		ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac);
2869 		if (ret)
2870 			return ret;
2871 	}
2872 
2873 	return 0;
2874 }
2875 
2876 #ifdef CONFIG_QED_SRIOV
2877 extern const struct qed_iov_hv_ops qed_iov_ops_pass;
2878 #endif
2879 
2880 #ifdef CONFIG_DCB
2881 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
2882 #endif
2883 
2884 extern const struct qed_eth_ptp_ops qed_ptp_ops_pass;
2885 
2886 static const struct qed_eth_ops qed_eth_ops_pass = {
2887 	.common = &qed_common_ops_pass,
2888 #ifdef CONFIG_QED_SRIOV
2889 	.iov = &qed_iov_ops_pass,
2890 #endif
2891 #ifdef CONFIG_DCB
2892 	.dcb = &qed_dcbnl_ops_pass,
2893 #endif
2894 	.ptp = &qed_ptp_ops_pass,
2895 	.fill_dev_info = &qed_fill_eth_dev_info,
2896 	.register_ops = &qed_register_eth_ops,
2897 	.check_mac = &qed_check_mac,
2898 	.vport_start = &qed_start_vport,
2899 	.vport_stop = &qed_stop_vport,
2900 	.vport_update = &qed_update_vport,
2901 	.q_rx_start = &qed_start_rxq,
2902 	.q_rx_stop = &qed_stop_rxq,
2903 	.q_tx_start = &qed_start_txq,
2904 	.q_tx_stop = &qed_stop_txq,
2905 	.filter_config = &qed_configure_filter,
2906 	.fastpath_stop = &qed_fastpath_stop,
2907 	.eth_cqe_completion = &qed_fp_cqe_completion,
2908 	.get_vport_stats = &qed_get_vport_stats,
2909 	.tunn_config = &qed_tunn_configure,
2910 	.ntuple_filter_config = &qed_ntuple_arfs_filter_config,
2911 	.configure_arfs_searcher = &qed_configure_arfs_searcher,
2912 	.get_coalesce = &qed_get_coalesce,
2913 	.req_bulletin_update_mac = &qed_req_bulletin_update_mac,
2914 };
2915 
2916 const struct qed_eth_ops *qed_get_eth_ops(void)
2917 {
2918 	return &qed_eth_ops_pass;
2919 }
2920 EXPORT_SYMBOL(qed_get_eth_ops);
2921 
2922 void qed_put_eth_ops(void)
2923 {
2924 	/* TODO - reference count for module? */
2925 }
2926 EXPORT_SYMBOL(qed_put_eth_ops);
2927