xref: /openbmc/linux/drivers/net/ethernet/qlogic/qed/qed_l2.c (revision 45cc842d5b75ba8f9a958f2dd12b95c6dd0452bd)
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <asm/param.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/etherdevice.h>
39 #include <linux/interrupt.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/stddef.h>
45 #include <linux/string.h>
46 #include <linux/workqueue.h>
47 #include <linux/bitops.h>
48 #include <linux/bug.h>
49 #include <linux/vmalloc.h>
50 #include "qed.h"
51 #include <linux/qed/qed_chain.h>
52 #include "qed_cxt.h"
53 #include "qed_dev_api.h"
54 #include <linux/qed/qed_eth_if.h>
55 #include "qed_hsi.h"
56 #include "qed_hw.h"
57 #include "qed_int.h"
58 #include "qed_l2.h"
59 #include "qed_mcp.h"
60 #include "qed_reg_addr.h"
61 #include "qed_sp.h"
62 #include "qed_sriov.h"
63 
64 
65 #define QED_MAX_SGES_NUM 16
66 #define CRC32_POLY 0x1edc6f41
67 
68 struct qed_l2_info {
69 	u32 queues;
70 	unsigned long **pp_qid_usage;
71 
72 	/* The lock is meant to synchronize access to the qid usage */
73 	struct mutex lock;
74 };
75 
76 int qed_l2_alloc(struct qed_hwfn *p_hwfn)
77 {
78 	struct qed_l2_info *p_l2_info;
79 	unsigned long **pp_qids;
80 	u32 i;
81 
82 	if (!QED_IS_L2_PERSONALITY(p_hwfn))
83 		return 0;
84 
85 	p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
86 	if (!p_l2_info)
87 		return -ENOMEM;
88 	p_hwfn->p_l2_info = p_l2_info;
89 
90 	if (IS_PF(p_hwfn->cdev)) {
91 		p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE);
92 	} else {
93 		u8 rx = 0, tx = 0;
94 
95 		qed_vf_get_num_rxqs(p_hwfn, &rx);
96 		qed_vf_get_num_txqs(p_hwfn, &tx);
97 
98 		p_l2_info->queues = max_t(u8, rx, tx);
99 	}
100 
101 	pp_qids = kzalloc(sizeof(unsigned long *) * p_l2_info->queues,
102 			  GFP_KERNEL);
103 	if (!pp_qids)
104 		return -ENOMEM;
105 	p_l2_info->pp_qid_usage = pp_qids;
106 
107 	for (i = 0; i < p_l2_info->queues; i++) {
108 		pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL);
109 		if (!pp_qids[i])
110 			return -ENOMEM;
111 	}
112 
113 	return 0;
114 }
115 
116 void qed_l2_setup(struct qed_hwfn *p_hwfn)
117 {
118 	if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
119 	    p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
120 		return;
121 
122 	mutex_init(&p_hwfn->p_l2_info->lock);
123 }
124 
125 void qed_l2_free(struct qed_hwfn *p_hwfn)
126 {
127 	u32 i;
128 
129 	if (p_hwfn->hw_info.personality != QED_PCI_ETH &&
130 	    p_hwfn->hw_info.personality != QED_PCI_ETH_ROCE)
131 		return;
132 
133 	if (!p_hwfn->p_l2_info)
134 		return;
135 
136 	if (!p_hwfn->p_l2_info->pp_qid_usage)
137 		goto out_l2_info;
138 
139 	/* Free until hit first uninitialized entry */
140 	for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
141 		if (!p_hwfn->p_l2_info->pp_qid_usage[i])
142 			break;
143 		kfree(p_hwfn->p_l2_info->pp_qid_usage[i]);
144 	}
145 
146 	kfree(p_hwfn->p_l2_info->pp_qid_usage);
147 
148 out_l2_info:
149 	kfree(p_hwfn->p_l2_info);
150 	p_hwfn->p_l2_info = NULL;
151 }
152 
153 static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn,
154 					struct qed_queue_cid *p_cid)
155 {
156 	struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info;
157 	u16 queue_id = p_cid->rel.queue_id;
158 	bool b_rc = true;
159 	u8 first;
160 
161 	mutex_lock(&p_l2_info->lock);
162 
163 	if (queue_id >= p_l2_info->queues) {
164 		DP_NOTICE(p_hwfn,
165 			  "Requested to increase usage for qzone %04x out of %08x\n",
166 			  queue_id, p_l2_info->queues);
167 		b_rc = false;
168 		goto out;
169 	}
170 
171 	first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id],
172 					MAX_QUEUES_PER_QZONE);
173 	if (first >= MAX_QUEUES_PER_QZONE) {
174 		b_rc = false;
175 		goto out;
176 	}
177 
178 	__set_bit(first, p_l2_info->pp_qid_usage[queue_id]);
179 	p_cid->qid_usage_idx = first;
180 
181 out:
182 	mutex_unlock(&p_l2_info->lock);
183 	return b_rc;
184 }
185 
186 static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn,
187 					struct qed_queue_cid *p_cid)
188 {
189 	mutex_lock(&p_hwfn->p_l2_info->lock);
190 
191 	clear_bit(p_cid->qid_usage_idx,
192 		  p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
193 
194 	mutex_unlock(&p_hwfn->p_l2_info->lock);
195 }
196 
197 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
198 			       struct qed_queue_cid *p_cid)
199 {
200 	bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID);
201 
202 	if (IS_PF(p_hwfn->cdev) && !b_legacy_vf)
203 		_qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
204 
205 	/* For PF's VFs we maintain the index inside queue-zone in IOV */
206 	if (p_cid->vfid == QED_QUEUE_CID_SELF)
207 		qed_eth_queue_qid_usage_del(p_hwfn, p_cid);
208 
209 	vfree(p_cid);
210 }
211 
212 /* The internal is only meant to be directly called by PFs initializeing CIDs
213  * for their VFs.
214  */
215 static struct qed_queue_cid *
216 _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
217 		      u16 opaque_fid,
218 		      u32 cid,
219 		      struct qed_queue_start_common_params *p_params,
220 		      bool b_is_rx,
221 		      struct qed_queue_cid_vf_params *p_vf_params)
222 {
223 	struct qed_queue_cid *p_cid;
224 	int rc;
225 
226 	p_cid = vzalloc(sizeof(*p_cid));
227 	if (!p_cid)
228 		return NULL;
229 
230 	p_cid->opaque_fid = opaque_fid;
231 	p_cid->cid = cid;
232 	p_cid->p_owner = p_hwfn;
233 
234 	/* Fill in parameters */
235 	p_cid->rel.vport_id = p_params->vport_id;
236 	p_cid->rel.queue_id = p_params->queue_id;
237 	p_cid->rel.stats_id = p_params->stats_id;
238 	p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
239 	p_cid->b_is_rx = b_is_rx;
240 	p_cid->sb_idx = p_params->sb_idx;
241 
242 	/* Fill-in bits related to VFs' queues if information was provided */
243 	if (p_vf_params) {
244 		p_cid->vfid = p_vf_params->vfid;
245 		p_cid->vf_qid = p_vf_params->vf_qid;
246 		p_cid->vf_legacy = p_vf_params->vf_legacy;
247 	} else {
248 		p_cid->vfid = QED_QUEUE_CID_SELF;
249 	}
250 
251 	/* Don't try calculating the absolute indices for VFs */
252 	if (IS_VF(p_hwfn->cdev)) {
253 		p_cid->abs = p_cid->rel;
254 		goto out;
255 	}
256 
257 	/* Calculate the engine-absolute indices of the resources.
258 	 * This would guarantee they're valid later on.
259 	 * In some cases [SBs] we already have the right values.
260 	 */
261 	rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
262 	if (rc)
263 		goto fail;
264 
265 	rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
266 	if (rc)
267 		goto fail;
268 
269 	/* In case of a PF configuring its VF's queues, the stats-id is already
270 	 * absolute [since there's a single index that's suitable per-VF].
271 	 */
272 	if (p_cid->vfid == QED_QUEUE_CID_SELF) {
273 		rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
274 				  &p_cid->abs.stats_id);
275 		if (rc)
276 			goto fail;
277 	} else {
278 		p_cid->abs.stats_id = p_cid->rel.stats_id;
279 	}
280 
281 out:
282 	/* VF-images have provided the qid_usage_idx on their own.
283 	 * Otherwise, we need to allocate a unique one.
284 	 */
285 	if (!p_vf_params) {
286 		if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid))
287 			goto fail;
288 	} else {
289 		p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
290 	}
291 
292 	DP_VERBOSE(p_hwfn,
293 		   QED_MSG_SP,
294 		   "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
295 		   p_cid->opaque_fid,
296 		   p_cid->cid,
297 		   p_cid->rel.vport_id,
298 		   p_cid->abs.vport_id,
299 		   p_cid->rel.queue_id,
300 		   p_cid->qid_usage_idx,
301 		   p_cid->abs.queue_id,
302 		   p_cid->rel.stats_id,
303 		   p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx);
304 
305 	return p_cid;
306 
307 fail:
308 	vfree(p_cid);
309 	return NULL;
310 }
311 
312 struct qed_queue_cid *
313 qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
314 		     u16 opaque_fid,
315 		     struct qed_queue_start_common_params *p_params,
316 		     bool b_is_rx,
317 		     struct qed_queue_cid_vf_params *p_vf_params)
318 {
319 	struct qed_queue_cid *p_cid;
320 	u8 vfid = QED_CXT_PF_CID;
321 	bool b_legacy_vf = false;
322 	u32 cid = 0;
323 
324 	/* In case of legacy VFs, The CID can be derived from the additional
325 	 * VF parameters - the VF assumes queue X uses CID X, so we can simply
326 	 * use the vf_qid for this purpose as well.
327 	 */
328 	if (p_vf_params) {
329 		vfid = p_vf_params->vfid;
330 
331 		if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) {
332 			b_legacy_vf = true;
333 			cid = p_vf_params->vf_qid;
334 		}
335 	}
336 
337 	/* Get a unique firmware CID for this queue, in case it's a PF.
338 	 * VF's don't need a CID as the queue configuration will be done
339 	 * by PF.
340 	 */
341 	if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
342 		if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
343 					 &cid, vfid)) {
344 			DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
345 			return NULL;
346 		}
347 	}
348 
349 	p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
350 				      p_params, b_is_rx, p_vf_params);
351 	if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
352 		_qed_cxt_release_cid(p_hwfn, cid, vfid);
353 
354 	return p_cid;
355 }
356 
357 static struct qed_queue_cid *
358 qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn,
359 			u16 opaque_fid,
360 			bool b_is_rx,
361 			struct qed_queue_start_common_params *p_params)
362 {
363 	return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
364 				    NULL);
365 }
366 
367 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
368 			   struct qed_sp_vport_start_params *p_params)
369 {
370 	struct vport_start_ramrod_data *p_ramrod = NULL;
371 	struct qed_spq_entry *p_ent =  NULL;
372 	struct qed_sp_init_data init_data;
373 	u8 abs_vport_id = 0;
374 	int rc = -EINVAL;
375 	u16 rx_mode = 0;
376 
377 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
378 	if (rc)
379 		return rc;
380 
381 	memset(&init_data, 0, sizeof(init_data));
382 	init_data.cid = qed_spq_get_cid(p_hwfn);
383 	init_data.opaque_fid = p_params->opaque_fid;
384 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
385 
386 	rc = qed_sp_init_request(p_hwfn, &p_ent,
387 				 ETH_RAMROD_VPORT_START,
388 				 PROTOCOLID_ETH, &init_data);
389 	if (rc)
390 		return rc;
391 
392 	p_ramrod		= &p_ent->ramrod.vport_start;
393 	p_ramrod->vport_id	= abs_vport_id;
394 
395 	p_ramrod->mtu			= cpu_to_le16(p_params->mtu);
396 	p_ramrod->handle_ptp_pkts	= p_params->handle_ptp_pkts;
397 	p_ramrod->inner_vlan_removal_en	= p_params->remove_inner_vlan;
398 	p_ramrod->drop_ttl0_en		= p_params->drop_ttl0;
399 	p_ramrod->untagged		= p_params->only_untagged;
400 
401 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
402 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
403 
404 	p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
405 
406 	/* TPA related fields */
407 	memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
408 
409 	p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
410 
411 	switch (p_params->tpa_mode) {
412 	case QED_TPA_MODE_GRO:
413 		p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
414 		p_ramrod->tpa_param.tpa_max_size = (u16)-1;
415 		p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
416 		p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
417 		p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
418 		p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
419 		p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
420 		p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
421 		break;
422 	default:
423 		break;
424 	}
425 
426 	p_ramrod->tx_switching_en = p_params->tx_switching;
427 
428 	p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
429 	p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
430 
431 	/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
432 	p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
433 						  p_params->concrete_fid);
434 
435 	return qed_spq_post(p_hwfn, p_ent, NULL);
436 }
437 
438 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
439 			      struct qed_sp_vport_start_params *p_params)
440 {
441 	if (IS_VF(p_hwfn->cdev)) {
442 		return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
443 					     p_params->mtu,
444 					     p_params->remove_inner_vlan,
445 					     p_params->tpa_mode,
446 					     p_params->max_buffers_per_cqe,
447 					     p_params->only_untagged);
448 	}
449 
450 	return qed_sp_eth_vport_start(p_hwfn, p_params);
451 }
452 
453 static int
454 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
455 			struct vport_update_ramrod_data *p_ramrod,
456 			struct qed_rss_params *p_rss)
457 {
458 	struct eth_vport_rss_config *p_config;
459 	u16 capabilities = 0;
460 	int i, table_size;
461 	int rc = 0;
462 
463 	if (!p_rss) {
464 		p_ramrod->common.update_rss_flg = 0;
465 		return rc;
466 	}
467 	p_config = &p_ramrod->rss_config;
468 
469 	BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
470 
471 	rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
472 	if (rc)
473 		return rc;
474 
475 	p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
476 	p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
477 	p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
478 	p_config->update_rss_key = p_rss->update_rss_key;
479 
480 	p_config->rss_mode = p_rss->rss_enable ?
481 			     ETH_VPORT_RSS_MODE_REGULAR :
482 			     ETH_VPORT_RSS_MODE_DISABLED;
483 
484 	SET_FIELD(capabilities,
485 		  ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
486 		  !!(p_rss->rss_caps & QED_RSS_IPV4));
487 	SET_FIELD(capabilities,
488 		  ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
489 		  !!(p_rss->rss_caps & QED_RSS_IPV6));
490 	SET_FIELD(capabilities,
491 		  ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
492 		  !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
493 	SET_FIELD(capabilities,
494 		  ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
495 		  !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
496 	SET_FIELD(capabilities,
497 		  ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
498 		  !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
499 	SET_FIELD(capabilities,
500 		  ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
501 		  !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
502 	p_config->tbl_size = p_rss->rss_table_size_log;
503 
504 	p_config->capabilities = cpu_to_le16(capabilities);
505 
506 	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
507 		   "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
508 		   p_ramrod->common.update_rss_flg,
509 		   p_config->rss_mode,
510 		   p_config->update_rss_capabilities,
511 		   p_config->capabilities,
512 		   p_config->update_rss_ind_table, p_config->update_rss_key);
513 
514 	table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
515 			   1 << p_config->tbl_size);
516 	for (i = 0; i < table_size; i++) {
517 		struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
518 
519 		if (!p_queue)
520 			return -EINVAL;
521 
522 		p_config->indirection_table[i] =
523 		    cpu_to_le16(p_queue->abs.queue_id);
524 	}
525 
526 	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
527 		   "Configured RSS indirection table [%d entries]:\n",
528 		   table_size);
529 	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
530 		DP_VERBOSE(p_hwfn,
531 			   NETIF_MSG_IFUP,
532 			   "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
533 			   le16_to_cpu(p_config->indirection_table[i]),
534 			   le16_to_cpu(p_config->indirection_table[i + 1]),
535 			   le16_to_cpu(p_config->indirection_table[i + 2]),
536 			   le16_to_cpu(p_config->indirection_table[i + 3]),
537 			   le16_to_cpu(p_config->indirection_table[i + 4]),
538 			   le16_to_cpu(p_config->indirection_table[i + 5]),
539 			   le16_to_cpu(p_config->indirection_table[i + 6]),
540 			   le16_to_cpu(p_config->indirection_table[i + 7]),
541 			   le16_to_cpu(p_config->indirection_table[i + 8]),
542 			   le16_to_cpu(p_config->indirection_table[i + 9]),
543 			   le16_to_cpu(p_config->indirection_table[i + 10]),
544 			   le16_to_cpu(p_config->indirection_table[i + 11]),
545 			   le16_to_cpu(p_config->indirection_table[i + 12]),
546 			   le16_to_cpu(p_config->indirection_table[i + 13]),
547 			   le16_to_cpu(p_config->indirection_table[i + 14]),
548 			   le16_to_cpu(p_config->indirection_table[i + 15]));
549 	}
550 
551 	for (i = 0; i < 10; i++)
552 		p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
553 
554 	return rc;
555 }
556 
557 static void
558 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
559 			  struct vport_update_ramrod_data *p_ramrod,
560 			  struct qed_filter_accept_flags accept_flags)
561 {
562 	p_ramrod->common.update_rx_mode_flg =
563 		accept_flags.update_rx_mode_config;
564 
565 	p_ramrod->common.update_tx_mode_flg =
566 		accept_flags.update_tx_mode_config;
567 
568 	/* Set Rx mode accept flags */
569 	if (p_ramrod->common.update_rx_mode_flg) {
570 		u8 accept_filter = accept_flags.rx_accept_filter;
571 		u16 state = 0;
572 
573 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
574 			  !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
575 			    !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
576 
577 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
578 			  !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
579 
580 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
581 			  !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
582 			    !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
583 
584 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
585 			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
586 			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
587 
588 		SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
589 			  !!(accept_filter & QED_ACCEPT_BCAST));
590 
591 		p_ramrod->rx_mode.state = cpu_to_le16(state);
592 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
593 			   "p_ramrod->rx_mode.state = 0x%x\n", state);
594 	}
595 
596 	/* Set Tx mode accept flags */
597 	if (p_ramrod->common.update_tx_mode_flg) {
598 		u8 accept_filter = accept_flags.tx_accept_filter;
599 		u16 state = 0;
600 
601 		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
602 			  !!(accept_filter & QED_ACCEPT_NONE));
603 
604 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
605 			  !!(accept_filter & QED_ACCEPT_NONE));
606 
607 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
608 			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
609 			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
610 
611 		SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
612 			  !!(accept_filter & QED_ACCEPT_BCAST));
613 
614 		p_ramrod->tx_mode.state = cpu_to_le16(state);
615 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
616 			   "p_ramrod->tx_mode.state = 0x%x\n", state);
617 	}
618 }
619 
620 static void
621 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
622 			    struct vport_update_ramrod_data *p_ramrod,
623 			    struct qed_sge_tpa_params *p_params)
624 {
625 	struct eth_vport_tpa_param *p_tpa;
626 
627 	if (!p_params) {
628 		p_ramrod->common.update_tpa_param_flg = 0;
629 		p_ramrod->common.update_tpa_en_flg = 0;
630 		p_ramrod->common.update_tpa_param_flg = 0;
631 		return;
632 	}
633 
634 	p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
635 	p_tpa = &p_ramrod->tpa_param;
636 	p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
637 	p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
638 	p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
639 	p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
640 
641 	p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
642 	p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
643 	p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
644 	p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
645 	p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
646 	p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
647 	p_tpa->tpa_max_size = p_params->tpa_max_size;
648 	p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
649 	p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
650 }
651 
652 static void
653 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
654 			struct vport_update_ramrod_data *p_ramrod,
655 			struct qed_sp_vport_update_params *p_params)
656 {
657 	int i;
658 
659 	memset(&p_ramrod->approx_mcast.bins, 0,
660 	       sizeof(p_ramrod->approx_mcast.bins));
661 
662 	if (!p_params->update_approx_mcast_flg)
663 		return;
664 
665 	p_ramrod->common.update_approx_mcast_flg = 1;
666 	for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
667 		u32 *p_bins = (u32 *)p_params->bins;
668 
669 		p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
670 	}
671 }
672 
673 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
674 			struct qed_sp_vport_update_params *p_params,
675 			enum spq_mode comp_mode,
676 			struct qed_spq_comp_cb *p_comp_data)
677 {
678 	struct qed_rss_params *p_rss_params = p_params->rss_params;
679 	struct vport_update_ramrod_data_cmn *p_cmn;
680 	struct qed_sp_init_data init_data;
681 	struct vport_update_ramrod_data *p_ramrod = NULL;
682 	struct qed_spq_entry *p_ent = NULL;
683 	u8 abs_vport_id = 0, val;
684 	int rc = -EINVAL;
685 
686 	if (IS_VF(p_hwfn->cdev)) {
687 		rc = qed_vf_pf_vport_update(p_hwfn, p_params);
688 		return rc;
689 	}
690 
691 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
692 	if (rc)
693 		return rc;
694 
695 	memset(&init_data, 0, sizeof(init_data));
696 	init_data.cid = qed_spq_get_cid(p_hwfn);
697 	init_data.opaque_fid = p_params->opaque_fid;
698 	init_data.comp_mode = comp_mode;
699 	init_data.p_comp_data = p_comp_data;
700 
701 	rc = qed_sp_init_request(p_hwfn, &p_ent,
702 				 ETH_RAMROD_VPORT_UPDATE,
703 				 PROTOCOLID_ETH, &init_data);
704 	if (rc)
705 		return rc;
706 
707 	/* Copy input params to ramrod according to FW struct */
708 	p_ramrod = &p_ent->ramrod.vport_update;
709 	p_cmn = &p_ramrod->common;
710 
711 	p_cmn->vport_id = abs_vport_id;
712 	p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
713 	p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
714 	p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
715 	p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
716 	p_cmn->accept_any_vlan = p_params->accept_any_vlan;
717 	val = p_params->update_accept_any_vlan_flg;
718 	p_cmn->update_accept_any_vlan_flg = val;
719 
720 	p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
721 	val = p_params->update_inner_vlan_removal_flg;
722 	p_cmn->update_inner_vlan_removal_en_flg = val;
723 
724 	p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
725 	val = p_params->update_default_vlan_enable_flg;
726 	p_cmn->update_default_vlan_en_flg = val;
727 
728 	p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
729 	p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
730 
731 	p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
732 
733 	p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
734 	p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
735 
736 	p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
737 	val = p_params->update_anti_spoofing_en_flg;
738 	p_ramrod->common.update_anti_spoofing_en_flg = val;
739 
740 	rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
741 	if (rc) {
742 		/* Return spq entry which is taken in qed_sp_init_request()*/
743 		qed_spq_return_entry(p_hwfn, p_ent);
744 		return rc;
745 	}
746 
747 	/* Update mcast bins for VFs, PF doesn't use this functionality */
748 	qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
749 
750 	qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
751 	qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
752 	return qed_spq_post(p_hwfn, p_ent, NULL);
753 }
754 
755 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
756 {
757 	struct vport_stop_ramrod_data *p_ramrod;
758 	struct qed_sp_init_data init_data;
759 	struct qed_spq_entry *p_ent;
760 	u8 abs_vport_id = 0;
761 	int rc;
762 
763 	if (IS_VF(p_hwfn->cdev))
764 		return qed_vf_pf_vport_stop(p_hwfn);
765 
766 	rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
767 	if (rc)
768 		return rc;
769 
770 	memset(&init_data, 0, sizeof(init_data));
771 	init_data.cid = qed_spq_get_cid(p_hwfn);
772 	init_data.opaque_fid = opaque_fid;
773 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
774 
775 	rc = qed_sp_init_request(p_hwfn, &p_ent,
776 				 ETH_RAMROD_VPORT_STOP,
777 				 PROTOCOLID_ETH, &init_data);
778 	if (rc)
779 		return rc;
780 
781 	p_ramrod = &p_ent->ramrod.vport_stop;
782 	p_ramrod->vport_id = abs_vport_id;
783 
784 	return qed_spq_post(p_hwfn, p_ent, NULL);
785 }
786 
787 static int
788 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
789 		       struct qed_filter_accept_flags *p_accept_flags)
790 {
791 	struct qed_sp_vport_update_params s_params;
792 
793 	memset(&s_params, 0, sizeof(s_params));
794 	memcpy(&s_params.accept_flags, p_accept_flags,
795 	       sizeof(struct qed_filter_accept_flags));
796 
797 	return qed_vf_pf_vport_update(p_hwfn, &s_params);
798 }
799 
800 static int qed_filter_accept_cmd(struct qed_dev *cdev,
801 				 u8 vport,
802 				 struct qed_filter_accept_flags accept_flags,
803 				 u8 update_accept_any_vlan,
804 				 u8 accept_any_vlan,
805 				 enum spq_mode comp_mode,
806 				 struct qed_spq_comp_cb *p_comp_data)
807 {
808 	struct qed_sp_vport_update_params vport_update_params;
809 	int i, rc;
810 
811 	/* Prepare and send the vport rx_mode change */
812 	memset(&vport_update_params, 0, sizeof(vport_update_params));
813 	vport_update_params.vport_id = vport;
814 	vport_update_params.accept_flags = accept_flags;
815 	vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
816 	vport_update_params.accept_any_vlan = accept_any_vlan;
817 
818 	for_each_hwfn(cdev, i) {
819 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
820 
821 		vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
822 
823 		if (IS_VF(cdev)) {
824 			rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
825 			if (rc)
826 				return rc;
827 			continue;
828 		}
829 
830 		rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
831 					 comp_mode, p_comp_data);
832 		if (rc) {
833 			DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
834 			return rc;
835 		}
836 
837 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
838 			   "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
839 			   accept_flags.rx_accept_filter,
840 			   accept_flags.tx_accept_filter);
841 		if (update_accept_any_vlan)
842 			DP_VERBOSE(p_hwfn, QED_MSG_SP,
843 				   "accept_any_vlan=%d configured\n",
844 				   accept_any_vlan);
845 	}
846 
847 	return 0;
848 }
849 
850 int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
851 			     struct qed_queue_cid *p_cid,
852 			     u16 bd_max_bytes,
853 			     dma_addr_t bd_chain_phys_addr,
854 			     dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
855 {
856 	struct rx_queue_start_ramrod_data *p_ramrod = NULL;
857 	struct qed_spq_entry *p_ent = NULL;
858 	struct qed_sp_init_data init_data;
859 	int rc = -EINVAL;
860 
861 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
862 		   "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
863 		   p_cid->opaque_fid, p_cid->cid,
864 		   p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id);
865 
866 	/* Get SPQ entry */
867 	memset(&init_data, 0, sizeof(init_data));
868 	init_data.cid = p_cid->cid;
869 	init_data.opaque_fid = p_cid->opaque_fid;
870 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
871 
872 	rc = qed_sp_init_request(p_hwfn, &p_ent,
873 				 ETH_RAMROD_RX_QUEUE_START,
874 				 PROTOCOLID_ETH, &init_data);
875 	if (rc)
876 		return rc;
877 
878 	p_ramrod = &p_ent->ramrod.rx_queue_start;
879 
880 	p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
881 	p_ramrod->sb_index = p_cid->sb_idx;
882 	p_ramrod->vport_id = p_cid->abs.vport_id;
883 	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
884 	p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
885 	p_ramrod->complete_cqe_flg = 0;
886 	p_ramrod->complete_event_flg = 1;
887 
888 	p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
889 	DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
890 
891 	p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
892 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
893 
894 	if (p_cid->vfid != QED_QUEUE_CID_SELF) {
895 		bool b_legacy_vf = !!(p_cid->vf_legacy &
896 				      QED_QCID_LEGACY_VF_RX_PROD);
897 
898 		p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
899 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
900 			   "Queue%s is meant for VF rxq[%02x]\n",
901 			   b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid);
902 		p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
903 	}
904 
905 	return qed_spq_post(p_hwfn, p_ent, NULL);
906 }
907 
908 static int
909 qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
910 			  struct qed_queue_cid *p_cid,
911 			  u16 bd_max_bytes,
912 			  dma_addr_t bd_chain_phys_addr,
913 			  dma_addr_t cqe_pbl_addr,
914 			  u16 cqe_pbl_size, void __iomem **pp_prod)
915 {
916 	u32 init_prod_val = 0;
917 
918 	*pp_prod = p_hwfn->regview +
919 		   GTT_BAR0_MAP_REG_MSDM_RAM +
920 		    MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
921 
922 	/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
923 	__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
924 			  (u32 *)(&init_prod_val));
925 
926 	return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
927 					bd_max_bytes,
928 					bd_chain_phys_addr,
929 					cqe_pbl_addr, cqe_pbl_size);
930 }
931 
932 static int
933 qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
934 		       u16 opaque_fid,
935 		       struct qed_queue_start_common_params *p_params,
936 		       u16 bd_max_bytes,
937 		       dma_addr_t bd_chain_phys_addr,
938 		       dma_addr_t cqe_pbl_addr,
939 		       u16 cqe_pbl_size,
940 		       struct qed_rxq_start_ret_params *p_ret_params)
941 {
942 	struct qed_queue_cid *p_cid;
943 	int rc;
944 
945 	/* Allocate a CID for the queue */
946 	p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
947 	if (!p_cid)
948 		return -ENOMEM;
949 
950 	if (IS_PF(p_hwfn->cdev)) {
951 		rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
952 					       bd_max_bytes,
953 					       bd_chain_phys_addr,
954 					       cqe_pbl_addr, cqe_pbl_size,
955 					       &p_ret_params->p_prod);
956 	} else {
957 		rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
958 					 bd_max_bytes,
959 					 bd_chain_phys_addr,
960 					 cqe_pbl_addr,
961 					 cqe_pbl_size, &p_ret_params->p_prod);
962 	}
963 
964 	/* Provide the caller with a reference to as handler */
965 	if (rc)
966 		qed_eth_queue_cid_release(p_hwfn, p_cid);
967 	else
968 		p_ret_params->p_handle = (void *)p_cid;
969 
970 	return rc;
971 }
972 
973 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
974 				void **pp_rxq_handles,
975 				u8 num_rxqs,
976 				u8 complete_cqe_flg,
977 				u8 complete_event_flg,
978 				enum spq_mode comp_mode,
979 				struct qed_spq_comp_cb *p_comp_data)
980 {
981 	struct rx_queue_update_ramrod_data *p_ramrod = NULL;
982 	struct qed_spq_entry *p_ent = NULL;
983 	struct qed_sp_init_data init_data;
984 	struct qed_queue_cid *p_cid;
985 	int rc = -EINVAL;
986 	u8 i;
987 
988 	memset(&init_data, 0, sizeof(init_data));
989 	init_data.comp_mode = comp_mode;
990 	init_data.p_comp_data = p_comp_data;
991 
992 	for (i = 0; i < num_rxqs; i++) {
993 		p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
994 
995 		/* Get SPQ entry */
996 		init_data.cid = p_cid->cid;
997 		init_data.opaque_fid = p_cid->opaque_fid;
998 
999 		rc = qed_sp_init_request(p_hwfn, &p_ent,
1000 					 ETH_RAMROD_RX_QUEUE_UPDATE,
1001 					 PROTOCOLID_ETH, &init_data);
1002 		if (rc)
1003 			return rc;
1004 
1005 		p_ramrod = &p_ent->ramrod.rx_queue_update;
1006 		p_ramrod->vport_id = p_cid->abs.vport_id;
1007 
1008 		p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1009 		p_ramrod->complete_cqe_flg = complete_cqe_flg;
1010 		p_ramrod->complete_event_flg = complete_event_flg;
1011 
1012 		rc = qed_spq_post(p_hwfn, p_ent, NULL);
1013 		if (rc)
1014 			return rc;
1015 	}
1016 
1017 	return rc;
1018 }
1019 
1020 static int
1021 qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
1022 			 struct qed_queue_cid *p_cid,
1023 			 bool b_eq_completion_only, bool b_cqe_completion)
1024 {
1025 	struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
1026 	struct qed_spq_entry *p_ent = NULL;
1027 	struct qed_sp_init_data init_data;
1028 	int rc;
1029 
1030 	memset(&init_data, 0, sizeof(init_data));
1031 	init_data.cid = p_cid->cid;
1032 	init_data.opaque_fid = p_cid->opaque_fid;
1033 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1034 
1035 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1036 				 ETH_RAMROD_RX_QUEUE_STOP,
1037 				 PROTOCOLID_ETH, &init_data);
1038 	if (rc)
1039 		return rc;
1040 
1041 	p_ramrod = &p_ent->ramrod.rx_queue_stop;
1042 	p_ramrod->vport_id = p_cid->abs.vport_id;
1043 	p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1044 
1045 	/* Cleaning the queue requires the completion to arrive there.
1046 	 * In addition, VFs require the answer to come as eqe to PF.
1047 	 */
1048 	p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
1049 				      !b_eq_completion_only) ||
1050 				     b_cqe_completion;
1051 	p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) ||
1052 				       b_eq_completion_only;
1053 
1054 	return qed_spq_post(p_hwfn, p_ent, NULL);
1055 }
1056 
1057 int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
1058 			  void *p_rxq,
1059 			  bool eq_completion_only, bool cqe_completion)
1060 {
1061 	struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
1062 	int rc = -EINVAL;
1063 
1064 	if (IS_PF(p_hwfn->cdev))
1065 		rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1066 					      eq_completion_only,
1067 					      cqe_completion);
1068 	else
1069 		rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1070 
1071 	if (!rc)
1072 		qed_eth_queue_cid_release(p_hwfn, p_cid);
1073 	return rc;
1074 }
1075 
1076 int
1077 qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
1078 			 struct qed_queue_cid *p_cid,
1079 			 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
1080 {
1081 	struct tx_queue_start_ramrod_data *p_ramrod = NULL;
1082 	struct qed_spq_entry *p_ent = NULL;
1083 	struct qed_sp_init_data init_data;
1084 	int rc = -EINVAL;
1085 
1086 	/* Get SPQ entry */
1087 	memset(&init_data, 0, sizeof(init_data));
1088 	init_data.cid = p_cid->cid;
1089 	init_data.opaque_fid = p_cid->opaque_fid;
1090 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1091 
1092 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1093 				 ETH_RAMROD_TX_QUEUE_START,
1094 				 PROTOCOLID_ETH, &init_data);
1095 	if (rc)
1096 		return rc;
1097 
1098 	p_ramrod = &p_ent->ramrod.tx_queue_start;
1099 	p_ramrod->vport_id = p_cid->abs.vport_id;
1100 
1101 	p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
1102 	p_ramrod->sb_index = p_cid->sb_idx;
1103 	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1104 
1105 	p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
1106 	p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
1107 
1108 	p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1109 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1110 
1111 	p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1112 
1113 	return qed_spq_post(p_hwfn, p_ent, NULL);
1114 }
1115 
1116 static int
1117 qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
1118 			  struct qed_queue_cid *p_cid,
1119 			  u8 tc,
1120 			  dma_addr_t pbl_addr,
1121 			  u16 pbl_size, void __iomem **pp_doorbell)
1122 {
1123 	int rc;
1124 
1125 
1126 	rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
1127 				      pbl_addr, pbl_size,
1128 				      qed_get_cm_pq_idx_mcos(p_hwfn, tc));
1129 	if (rc)
1130 		return rc;
1131 
1132 	/* Provide the caller with the necessary return values */
1133 	*pp_doorbell = p_hwfn->doorbells +
1134 		       qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
1135 
1136 	return 0;
1137 }
1138 
1139 static int
1140 qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
1141 		       u16 opaque_fid,
1142 		       struct qed_queue_start_common_params *p_params,
1143 		       u8 tc,
1144 		       dma_addr_t pbl_addr,
1145 		       u16 pbl_size,
1146 		       struct qed_txq_start_ret_params *p_ret_params)
1147 {
1148 	struct qed_queue_cid *p_cid;
1149 	int rc;
1150 
1151 	p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
1152 	if (!p_cid)
1153 		return -EINVAL;
1154 
1155 	if (IS_PF(p_hwfn->cdev))
1156 		rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1157 					       pbl_addr, pbl_size,
1158 					       &p_ret_params->p_doorbell);
1159 	else
1160 		rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
1161 					 pbl_addr, pbl_size,
1162 					 &p_ret_params->p_doorbell);
1163 
1164 	if (rc)
1165 		qed_eth_queue_cid_release(p_hwfn, p_cid);
1166 	else
1167 		p_ret_params->p_handle = (void *)p_cid;
1168 
1169 	return rc;
1170 }
1171 
1172 static int
1173 qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
1174 {
1175 	struct qed_spq_entry *p_ent = NULL;
1176 	struct qed_sp_init_data init_data;
1177 	int rc;
1178 
1179 	memset(&init_data, 0, sizeof(init_data));
1180 	init_data.cid = p_cid->cid;
1181 	init_data.opaque_fid = p_cid->opaque_fid;
1182 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1183 
1184 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1185 				 ETH_RAMROD_TX_QUEUE_STOP,
1186 				 PROTOCOLID_ETH, &init_data);
1187 	if (rc)
1188 		return rc;
1189 
1190 	return qed_spq_post(p_hwfn, p_ent, NULL);
1191 }
1192 
1193 int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
1194 {
1195 	struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
1196 	int rc;
1197 
1198 	if (IS_PF(p_hwfn->cdev))
1199 		rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1200 	else
1201 		rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
1202 
1203 	if (!rc)
1204 		qed_eth_queue_cid_release(p_hwfn, p_cid);
1205 	return rc;
1206 }
1207 
1208 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
1209 {
1210 	enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1211 
1212 	switch (opcode) {
1213 	case QED_FILTER_ADD:
1214 		action = ETH_FILTER_ACTION_ADD;
1215 		break;
1216 	case QED_FILTER_REMOVE:
1217 		action = ETH_FILTER_ACTION_REMOVE;
1218 		break;
1219 	case QED_FILTER_FLUSH:
1220 		action = ETH_FILTER_ACTION_REMOVE_ALL;
1221 		break;
1222 	default:
1223 		action = MAX_ETH_FILTER_ACTION;
1224 	}
1225 
1226 	return action;
1227 }
1228 
1229 static int
1230 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
1231 			u16 opaque_fid,
1232 			struct qed_filter_ucast *p_filter_cmd,
1233 			struct vport_filter_update_ramrod_data **pp_ramrod,
1234 			struct qed_spq_entry **pp_ent,
1235 			enum spq_mode comp_mode,
1236 			struct qed_spq_comp_cb *p_comp_data)
1237 {
1238 	u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1239 	struct vport_filter_update_ramrod_data *p_ramrod;
1240 	struct eth_filter_cmd *p_first_filter;
1241 	struct eth_filter_cmd *p_second_filter;
1242 	struct qed_sp_init_data init_data;
1243 	enum eth_filter_action action;
1244 	int rc;
1245 
1246 	rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1247 			  &vport_to_remove_from);
1248 	if (rc)
1249 		return rc;
1250 
1251 	rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1252 			  &vport_to_add_to);
1253 	if (rc)
1254 		return rc;
1255 
1256 	/* Get SPQ entry */
1257 	memset(&init_data, 0, sizeof(init_data));
1258 	init_data.cid = qed_spq_get_cid(p_hwfn);
1259 	init_data.opaque_fid = opaque_fid;
1260 	init_data.comp_mode = comp_mode;
1261 	init_data.p_comp_data = p_comp_data;
1262 
1263 	rc = qed_sp_init_request(p_hwfn, pp_ent,
1264 				 ETH_RAMROD_FILTERS_UPDATE,
1265 				 PROTOCOLID_ETH, &init_data);
1266 	if (rc)
1267 		return rc;
1268 
1269 	*pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1270 	p_ramrod = *pp_ramrod;
1271 	p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1272 	p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1273 
1274 	switch (p_filter_cmd->opcode) {
1275 	case QED_FILTER_REPLACE:
1276 	case QED_FILTER_MOVE:
1277 		p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
1278 	default:
1279 		p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
1280 	}
1281 
1282 	p_first_filter	= &p_ramrod->filter_cmds[0];
1283 	p_second_filter = &p_ramrod->filter_cmds[1];
1284 
1285 	switch (p_filter_cmd->type) {
1286 	case QED_FILTER_MAC:
1287 		p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
1288 	case QED_FILTER_VLAN:
1289 		p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
1290 	case QED_FILTER_MAC_VLAN:
1291 		p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
1292 	case QED_FILTER_INNER_MAC:
1293 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
1294 	case QED_FILTER_INNER_VLAN:
1295 		p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
1296 	case QED_FILTER_INNER_PAIR:
1297 		p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
1298 	case QED_FILTER_INNER_MAC_VNI_PAIR:
1299 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1300 		break;
1301 	case QED_FILTER_MAC_VNI_PAIR:
1302 		p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1303 	case QED_FILTER_VNI:
1304 		p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1305 	}
1306 
1307 	if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1308 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1309 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1310 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1311 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1312 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
1313 		qed_set_fw_mac_addr(&p_first_filter->mac_msb,
1314 				    &p_first_filter->mac_mid,
1315 				    &p_first_filter->mac_lsb,
1316 				    (u8 *)p_filter_cmd->mac);
1317 	}
1318 
1319 	if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1320 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1321 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1322 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1323 		p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
1324 
1325 	if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1326 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1327 	    (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1328 		p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
1329 
1330 	if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
1331 		p_second_filter->type = p_first_filter->type;
1332 		p_second_filter->mac_msb = p_first_filter->mac_msb;
1333 		p_second_filter->mac_mid = p_first_filter->mac_mid;
1334 		p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1335 		p_second_filter->vlan_id = p_first_filter->vlan_id;
1336 		p_second_filter->vni = p_first_filter->vni;
1337 
1338 		p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1339 
1340 		p_first_filter->vport_id = vport_to_remove_from;
1341 
1342 		p_second_filter->action = ETH_FILTER_ACTION_ADD;
1343 		p_second_filter->vport_id = vport_to_add_to;
1344 	} else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
1345 		p_first_filter->vport_id = vport_to_add_to;
1346 		memcpy(p_second_filter, p_first_filter,
1347 		       sizeof(*p_second_filter));
1348 		p_first_filter->action	= ETH_FILTER_ACTION_REMOVE_ALL;
1349 		p_second_filter->action = ETH_FILTER_ACTION_ADD;
1350 	} else {
1351 		action = qed_filter_action(p_filter_cmd->opcode);
1352 
1353 		if (action == MAX_ETH_FILTER_ACTION) {
1354 			DP_NOTICE(p_hwfn,
1355 				  "%d is not supported yet\n",
1356 				  p_filter_cmd->opcode);
1357 			return -EINVAL;
1358 		}
1359 
1360 		p_first_filter->action = action;
1361 		p_first_filter->vport_id = (p_filter_cmd->opcode ==
1362 					    QED_FILTER_REMOVE) ?
1363 					   vport_to_remove_from :
1364 					   vport_to_add_to;
1365 	}
1366 
1367 	return 0;
1368 }
1369 
1370 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1371 			    u16 opaque_fid,
1372 			    struct qed_filter_ucast *p_filter_cmd,
1373 			    enum spq_mode comp_mode,
1374 			    struct qed_spq_comp_cb *p_comp_data)
1375 {
1376 	struct vport_filter_update_ramrod_data	*p_ramrod	= NULL;
1377 	struct qed_spq_entry			*p_ent		= NULL;
1378 	struct eth_filter_cmd_header		*p_header;
1379 	int					rc;
1380 
1381 	rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1382 				     &p_ramrod, &p_ent,
1383 				     comp_mode, p_comp_data);
1384 	if (rc) {
1385 		DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1386 		return rc;
1387 	}
1388 	p_header = &p_ramrod->filter_cmd_hdr;
1389 	p_header->assert_on_error = p_filter_cmd->assert_on_error;
1390 
1391 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1392 	if (rc) {
1393 		DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1394 		return rc;
1395 	}
1396 
1397 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
1398 		   "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1399 		   (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1400 		   ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1401 		   "REMOVE" :
1402 		   ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1403 		    "MOVE" : "REPLACE")),
1404 		   (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1405 		   ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1406 		    "VLAN" : "MAC & VLAN"),
1407 		   p_ramrod->filter_cmd_hdr.cmd_cnt,
1408 		   p_filter_cmd->is_rx_filter,
1409 		   p_filter_cmd->is_tx_filter);
1410 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
1411 		   "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1412 		   p_filter_cmd->vport_to_add_to,
1413 		   p_filter_cmd->vport_to_remove_from,
1414 		   p_filter_cmd->mac[0],
1415 		   p_filter_cmd->mac[1],
1416 		   p_filter_cmd->mac[2],
1417 		   p_filter_cmd->mac[3],
1418 		   p_filter_cmd->mac[4],
1419 		   p_filter_cmd->mac[5],
1420 		   p_filter_cmd->vlan);
1421 
1422 	return 0;
1423 }
1424 
1425 /*******************************************************************************
1426  * Description:
1427  *         Calculates crc 32 on a buffer
1428  *         Note: crc32_length MUST be aligned to 8
1429  * Return:
1430  ******************************************************************************/
1431 static u32 qed_calc_crc32c(u8 *crc32_packet,
1432 			   u32 crc32_length, u32 crc32_seed, u8 complement)
1433 {
1434 	u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1435 	u8 msb = 0, current_byte = 0;
1436 
1437 	if ((!crc32_packet) ||
1438 	    (crc32_length == 0) ||
1439 	    ((crc32_length % 8) != 0))
1440 		return crc32_result;
1441 	for (byte = 0; byte < crc32_length; byte++) {
1442 		current_byte = crc32_packet[byte];
1443 		for (bit = 0; bit < 8; bit++) {
1444 			msb = (u8)(crc32_result >> 31);
1445 			crc32_result = crc32_result << 1;
1446 			if (msb != (0x1 & (current_byte >> bit))) {
1447 				crc32_result = crc32_result ^ CRC32_POLY;
1448 				crc32_result |= 1; /*crc32_result[0] = 1;*/
1449 			}
1450 		}
1451 	}
1452 	return crc32_result;
1453 }
1454 
1455 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
1456 {
1457 	u32 packet_buf[2] = { 0 };
1458 
1459 	memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1460 	return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1461 }
1462 
1463 u8 qed_mcast_bin_from_mac(u8 *mac)
1464 {
1465 	u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1466 				mac, ETH_ALEN);
1467 
1468 	return crc & 0xff;
1469 }
1470 
1471 static int
1472 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1473 			u16 opaque_fid,
1474 			struct qed_filter_mcast *p_filter_cmd,
1475 			enum spq_mode comp_mode,
1476 			struct qed_spq_comp_cb *p_comp_data)
1477 {
1478 	unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1479 	struct vport_update_ramrod_data *p_ramrod = NULL;
1480 	struct qed_spq_entry *p_ent = NULL;
1481 	struct qed_sp_init_data init_data;
1482 	u8 abs_vport_id = 0;
1483 	int rc, i;
1484 
1485 	if (p_filter_cmd->opcode == QED_FILTER_ADD)
1486 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1487 				  &abs_vport_id);
1488 	else
1489 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1490 				  &abs_vport_id);
1491 	if (rc)
1492 		return rc;
1493 
1494 	/* Get SPQ entry */
1495 	memset(&init_data, 0, sizeof(init_data));
1496 	init_data.cid = qed_spq_get_cid(p_hwfn);
1497 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1498 	init_data.comp_mode = comp_mode;
1499 	init_data.p_comp_data = p_comp_data;
1500 
1501 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1502 				 ETH_RAMROD_VPORT_UPDATE,
1503 				 PROTOCOLID_ETH, &init_data);
1504 	if (rc) {
1505 		DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1506 		return rc;
1507 	}
1508 
1509 	p_ramrod = &p_ent->ramrod.vport_update;
1510 	p_ramrod->common.update_approx_mcast_flg = 1;
1511 
1512 	/* explicitly clear out the entire vector */
1513 	memset(&p_ramrod->approx_mcast.bins, 0,
1514 	       sizeof(p_ramrod->approx_mcast.bins));
1515 	memset(bins, 0, sizeof(unsigned long) *
1516 	       ETH_MULTICAST_MAC_BINS_IN_REGS);
1517 	/* filter ADD op is explicit set op and it removes
1518 	 *  any existing filters for the vport
1519 	 */
1520 	if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1521 		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1522 			u32 bit;
1523 
1524 			bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1525 			__set_bit(bit, bins);
1526 		}
1527 
1528 		/* Convert to correct endianity */
1529 		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1530 			struct vport_update_ramrod_mcast *p_ramrod_bins;
1531 			u32 *p_bins = (u32 *)bins;
1532 
1533 			p_ramrod_bins = &p_ramrod->approx_mcast;
1534 			p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
1535 		}
1536 	}
1537 
1538 	p_ramrod->common.vport_id = abs_vport_id;
1539 
1540 	return qed_spq_post(p_hwfn, p_ent, NULL);
1541 }
1542 
1543 static int qed_filter_mcast_cmd(struct qed_dev *cdev,
1544 				struct qed_filter_mcast *p_filter_cmd,
1545 				enum spq_mode comp_mode,
1546 				struct qed_spq_comp_cb *p_comp_data)
1547 {
1548 	int rc = 0;
1549 	int i;
1550 
1551 	/* only ADD and REMOVE operations are supported for multi-cast */
1552 	if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1553 	     (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1554 	    (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1555 		return -EINVAL;
1556 
1557 	for_each_hwfn(cdev, i) {
1558 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1559 
1560 		u16 opaque_fid;
1561 
1562 		if (IS_VF(cdev)) {
1563 			qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1564 			continue;
1565 		}
1566 
1567 		opaque_fid = p_hwfn->hw_info.opaque_fid;
1568 
1569 		rc = qed_sp_eth_filter_mcast(p_hwfn,
1570 					     opaque_fid,
1571 					     p_filter_cmd,
1572 					     comp_mode, p_comp_data);
1573 	}
1574 	return rc;
1575 }
1576 
1577 static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1578 				struct qed_filter_ucast *p_filter_cmd,
1579 				enum spq_mode comp_mode,
1580 				struct qed_spq_comp_cb *p_comp_data)
1581 {
1582 	int rc = 0;
1583 	int i;
1584 
1585 	for_each_hwfn(cdev, i) {
1586 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1587 		u16 opaque_fid;
1588 
1589 		if (IS_VF(cdev)) {
1590 			rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1591 			continue;
1592 		}
1593 
1594 		opaque_fid = p_hwfn->hw_info.opaque_fid;
1595 
1596 		rc = qed_sp_eth_filter_ucast(p_hwfn,
1597 					     opaque_fid,
1598 					     p_filter_cmd,
1599 					     comp_mode, p_comp_data);
1600 		if (rc)
1601 			break;
1602 	}
1603 
1604 	return rc;
1605 }
1606 
1607 /* Statistics related code */
1608 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1609 					   u32 *p_addr,
1610 					   u32 *p_len, u16 statistics_bin)
1611 {
1612 	if (IS_PF(p_hwfn->cdev)) {
1613 		*p_addr = BAR0_MAP_REG_PSDM_RAM +
1614 		    PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1615 		*p_len = sizeof(struct eth_pstorm_per_queue_stat);
1616 	} else {
1617 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1618 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1619 
1620 		*p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1621 		*p_len = p_resp->pfdev_info.stats_info.pstats.len;
1622 	}
1623 }
1624 
1625 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
1626 				   struct qed_ptt *p_ptt,
1627 				   struct qed_eth_stats *p_stats,
1628 				   u16 statistics_bin)
1629 {
1630 	struct eth_pstorm_per_queue_stat pstats;
1631 	u32 pstats_addr = 0, pstats_len = 0;
1632 
1633 	__qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1634 				       statistics_bin);
1635 
1636 	memset(&pstats, 0, sizeof(pstats));
1637 	qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1638 
1639 	p_stats->common.tx_ucast_bytes +=
1640 	    HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1641 	p_stats->common.tx_mcast_bytes +=
1642 	    HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1643 	p_stats->common.tx_bcast_bytes +=
1644 	    HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1645 	p_stats->common.tx_ucast_pkts +=
1646 	    HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1647 	p_stats->common.tx_mcast_pkts +=
1648 	    HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1649 	p_stats->common.tx_bcast_pkts +=
1650 	    HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1651 	p_stats->common.tx_err_drop_pkts +=
1652 	    HILO_64_REGPAIR(pstats.error_drop_pkts);
1653 }
1654 
1655 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
1656 				   struct qed_ptt *p_ptt,
1657 				   struct qed_eth_stats *p_stats,
1658 				   u16 statistics_bin)
1659 {
1660 	struct tstorm_per_port_stat tstats;
1661 	u32 tstats_addr, tstats_len;
1662 
1663 	if (IS_PF(p_hwfn->cdev)) {
1664 		tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1665 		    TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1666 		tstats_len = sizeof(struct tstorm_per_port_stat);
1667 	} else {
1668 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1669 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1670 
1671 		tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1672 		tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1673 	}
1674 
1675 	memset(&tstats, 0, sizeof(tstats));
1676 	qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1677 
1678 	p_stats->common.mftag_filter_discards +=
1679 	    HILO_64_REGPAIR(tstats.mftag_filter_discard);
1680 	p_stats->common.mac_filter_discards +=
1681 	    HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1682 }
1683 
1684 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1685 					   u32 *p_addr,
1686 					   u32 *p_len, u16 statistics_bin)
1687 {
1688 	if (IS_PF(p_hwfn->cdev)) {
1689 		*p_addr = BAR0_MAP_REG_USDM_RAM +
1690 		    USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1691 		*p_len = sizeof(struct eth_ustorm_per_queue_stat);
1692 	} else {
1693 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1694 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1695 
1696 		*p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1697 		*p_len = p_resp->pfdev_info.stats_info.ustats.len;
1698 	}
1699 }
1700 
1701 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
1702 				   struct qed_ptt *p_ptt,
1703 				   struct qed_eth_stats *p_stats,
1704 				   u16 statistics_bin)
1705 {
1706 	struct eth_ustorm_per_queue_stat ustats;
1707 	u32 ustats_addr = 0, ustats_len = 0;
1708 
1709 	__qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1710 				       statistics_bin);
1711 
1712 	memset(&ustats, 0, sizeof(ustats));
1713 	qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1714 
1715 	p_stats->common.rx_ucast_bytes +=
1716 	    HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1717 	p_stats->common.rx_mcast_bytes +=
1718 	    HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1719 	p_stats->common.rx_bcast_bytes +=
1720 	    HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1721 	p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1722 	p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1723 	p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1724 }
1725 
1726 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1727 					   u32 *p_addr,
1728 					   u32 *p_len, u16 statistics_bin)
1729 {
1730 	if (IS_PF(p_hwfn->cdev)) {
1731 		*p_addr = BAR0_MAP_REG_MSDM_RAM +
1732 		    MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1733 		*p_len = sizeof(struct eth_mstorm_per_queue_stat);
1734 	} else {
1735 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1736 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1737 
1738 		*p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1739 		*p_len = p_resp->pfdev_info.stats_info.mstats.len;
1740 	}
1741 }
1742 
1743 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
1744 				   struct qed_ptt *p_ptt,
1745 				   struct qed_eth_stats *p_stats,
1746 				   u16 statistics_bin)
1747 {
1748 	struct eth_mstorm_per_queue_stat mstats;
1749 	u32 mstats_addr = 0, mstats_len = 0;
1750 
1751 	__qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1752 				       statistics_bin);
1753 
1754 	memset(&mstats, 0, sizeof(mstats));
1755 	qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1756 
1757 	p_stats->common.no_buff_discards +=
1758 	    HILO_64_REGPAIR(mstats.no_buff_discard);
1759 	p_stats->common.packet_too_big_discard +=
1760 	    HILO_64_REGPAIR(mstats.packet_too_big_discard);
1761 	p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1762 	p_stats->common.tpa_coalesced_pkts +=
1763 	    HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1764 	p_stats->common.tpa_coalesced_events +=
1765 	    HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1766 	p_stats->common.tpa_aborts_num +=
1767 	    HILO_64_REGPAIR(mstats.tpa_aborts_num);
1768 	p_stats->common.tpa_coalesced_bytes +=
1769 	    HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1770 }
1771 
1772 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1773 				       struct qed_ptt *p_ptt,
1774 				       struct qed_eth_stats *p_stats)
1775 {
1776 	struct qed_eth_stats_common *p_common = &p_stats->common;
1777 	struct port_stats port_stats;
1778 	int j;
1779 
1780 	memset(&port_stats, 0, sizeof(port_stats));
1781 
1782 	qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1783 			p_hwfn->mcp_info->port_addr +
1784 			offsetof(struct public_port, stats),
1785 			sizeof(port_stats));
1786 
1787 	p_common->rx_64_byte_packets += port_stats.eth.r64;
1788 	p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1789 	p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1790 	p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1791 	p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1792 	p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1793 	p_common->rx_crc_errors += port_stats.eth.rfcs;
1794 	p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1795 	p_common->rx_pause_frames += port_stats.eth.rxpf;
1796 	p_common->rx_pfc_frames += port_stats.eth.rxpp;
1797 	p_common->rx_align_errors += port_stats.eth.raln;
1798 	p_common->rx_carrier_errors += port_stats.eth.rfcr;
1799 	p_common->rx_oversize_packets += port_stats.eth.rovr;
1800 	p_common->rx_jabbers += port_stats.eth.rjbr;
1801 	p_common->rx_undersize_packets += port_stats.eth.rund;
1802 	p_common->rx_fragments += port_stats.eth.rfrg;
1803 	p_common->tx_64_byte_packets += port_stats.eth.t64;
1804 	p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1805 	p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1806 	p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1807 	p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1808 	p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1809 	p_common->tx_pause_frames += port_stats.eth.txpf;
1810 	p_common->tx_pfc_frames += port_stats.eth.txpp;
1811 	p_common->rx_mac_bytes += port_stats.eth.rbyte;
1812 	p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1813 	p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1814 	p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1815 	p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1816 	p_common->tx_mac_bytes += port_stats.eth.tbyte;
1817 	p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1818 	p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1819 	p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1820 	p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1821 	for (j = 0; j < 8; j++) {
1822 		p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1823 		p_common->brb_discards += port_stats.brb.brb_discard[j];
1824 	}
1825 
1826 	if (QED_IS_BB(p_hwfn->cdev)) {
1827 		struct qed_eth_stats_bb *p_bb = &p_stats->bb;
1828 
1829 		p_bb->rx_1519_to_1522_byte_packets +=
1830 		    port_stats.eth.u0.bb0.r1522;
1831 		p_bb->rx_1519_to_2047_byte_packets +=
1832 		    port_stats.eth.u0.bb0.r2047;
1833 		p_bb->rx_2048_to_4095_byte_packets +=
1834 		    port_stats.eth.u0.bb0.r4095;
1835 		p_bb->rx_4096_to_9216_byte_packets +=
1836 		    port_stats.eth.u0.bb0.r9216;
1837 		p_bb->rx_9217_to_16383_byte_packets +=
1838 		    port_stats.eth.u0.bb0.r16383;
1839 		p_bb->tx_1519_to_2047_byte_packets +=
1840 		    port_stats.eth.u1.bb1.t2047;
1841 		p_bb->tx_2048_to_4095_byte_packets +=
1842 		    port_stats.eth.u1.bb1.t4095;
1843 		p_bb->tx_4096_to_9216_byte_packets +=
1844 		    port_stats.eth.u1.bb1.t9216;
1845 		p_bb->tx_9217_to_16383_byte_packets +=
1846 		    port_stats.eth.u1.bb1.t16383;
1847 		p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1848 		p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1849 	} else {
1850 		struct qed_eth_stats_ah *p_ah = &p_stats->ah;
1851 
1852 		p_ah->rx_1519_to_max_byte_packets +=
1853 		    port_stats.eth.u0.ah0.r1519_to_max;
1854 		p_ah->tx_1519_to_max_byte_packets =
1855 		    port_stats.eth.u1.ah1.t1519_to_max;
1856 	}
1857 }
1858 
1859 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1860 				  struct qed_ptt *p_ptt,
1861 				  struct qed_eth_stats *stats,
1862 				  u16 statistics_bin, bool b_get_port_stats)
1863 {
1864 	__qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1865 	__qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1866 	__qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1867 	__qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1868 
1869 	if (b_get_port_stats && p_hwfn->mcp_info)
1870 		__qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1871 }
1872 
1873 static void _qed_get_vport_stats(struct qed_dev *cdev,
1874 				 struct qed_eth_stats *stats)
1875 {
1876 	u8 fw_vport = 0;
1877 	int i;
1878 
1879 	memset(stats, 0, sizeof(*stats));
1880 
1881 	for_each_hwfn(cdev, i) {
1882 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1883 		struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1884 						    :  NULL;
1885 
1886 		if (IS_PF(cdev)) {
1887 			/* The main vport index is relative first */
1888 			if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1889 				DP_ERR(p_hwfn, "No vport available!\n");
1890 				goto out;
1891 			}
1892 		}
1893 
1894 		if (IS_PF(cdev) && !p_ptt) {
1895 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1896 			continue;
1897 		}
1898 
1899 		__qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1900 				      IS_PF(cdev) ? true : false);
1901 
1902 out:
1903 		if (IS_PF(cdev) && p_ptt)
1904 			qed_ptt_release(p_hwfn, p_ptt);
1905 	}
1906 }
1907 
1908 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
1909 {
1910 	u32 i;
1911 
1912 	if (!cdev) {
1913 		memset(stats, 0, sizeof(*stats));
1914 		return;
1915 	}
1916 
1917 	_qed_get_vport_stats(cdev, stats);
1918 
1919 	if (!cdev->reset_stats)
1920 		return;
1921 
1922 	/* Reduce the statistics baseline */
1923 	for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1924 		((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1925 }
1926 
1927 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1928 void qed_reset_vport_stats(struct qed_dev *cdev)
1929 {
1930 	int i;
1931 
1932 	for_each_hwfn(cdev, i) {
1933 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1934 		struct eth_mstorm_per_queue_stat mstats;
1935 		struct eth_ustorm_per_queue_stat ustats;
1936 		struct eth_pstorm_per_queue_stat pstats;
1937 		struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1938 						    : NULL;
1939 		u32 addr = 0, len = 0;
1940 
1941 		if (IS_PF(cdev) && !p_ptt) {
1942 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1943 			continue;
1944 		}
1945 
1946 		memset(&mstats, 0, sizeof(mstats));
1947 		__qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1948 		qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1949 
1950 		memset(&ustats, 0, sizeof(ustats));
1951 		__qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1952 		qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1953 
1954 		memset(&pstats, 0, sizeof(pstats));
1955 		__qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1956 		qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1957 
1958 		if (IS_PF(cdev))
1959 			qed_ptt_release(p_hwfn, p_ptt);
1960 	}
1961 
1962 	/* PORT statistics are not necessarily reset, so we need to
1963 	 * read and create a baseline for future statistics.
1964 	 */
1965 	if (!cdev->reset_stats)
1966 		DP_INFO(cdev, "Reset stats not allocated\n");
1967 	else
1968 		_qed_get_vport_stats(cdev, cdev->reset_stats);
1969 }
1970 
1971 static enum gft_profile_type
1972 qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
1973 {
1974 	if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
1975 		return GFT_PROFILE_TYPE_4_TUPLE;
1976 	if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
1977 		return GFT_PROFILE_TYPE_IP_DST_PORT;
1978 	return GFT_PROFILE_TYPE_L4_DST_PORT;
1979 }
1980 
1981 void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
1982 			     struct qed_ptt *p_ptt,
1983 			     struct qed_arfs_config_params *p_cfg_params)
1984 {
1985 	if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
1986 		qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
1987 			       p_cfg_params->tcp,
1988 			       p_cfg_params->udp,
1989 			       p_cfg_params->ipv4,
1990 			       p_cfg_params->ipv6,
1991 			       qed_arfs_mode_to_hsi(p_cfg_params->mode));
1992 		DP_VERBOSE(p_hwfn,
1993 			   QED_MSG_SP,
1994 			   "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
1995 			   p_cfg_params->tcp ? "Enable" : "Disable",
1996 			   p_cfg_params->udp ? "Enable" : "Disable",
1997 			   p_cfg_params->ipv4 ? "Enable" : "Disable",
1998 			   p_cfg_params->ipv6 ? "Enable" : "Disable",
1999 			   (u32)p_cfg_params->mode);
2000 	} else {
2001 		DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
2002 		qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2003 	}
2004 }
2005 
2006 int
2007 qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
2008 				struct qed_spq_comp_cb *p_cb,
2009 				struct qed_ntuple_filter_params *p_params)
2010 {
2011 	struct rx_update_gft_filter_data *p_ramrod = NULL;
2012 	struct qed_spq_entry *p_ent = NULL;
2013 	struct qed_sp_init_data init_data;
2014 	u16 abs_rx_q_id = 0;
2015 	u8 abs_vport_id = 0;
2016 	int rc = -EINVAL;
2017 
2018 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
2019 	if (rc)
2020 		return rc;
2021 
2022 	if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
2023 		rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
2024 		if (rc)
2025 			return rc;
2026 	}
2027 
2028 	/* Get SPQ entry */
2029 	memset(&init_data, 0, sizeof(init_data));
2030 	init_data.cid = qed_spq_get_cid(p_hwfn);
2031 
2032 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2033 
2034 	if (p_cb) {
2035 		init_data.comp_mode = QED_SPQ_MODE_CB;
2036 		init_data.p_comp_data = p_cb;
2037 	} else {
2038 		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2039 	}
2040 
2041 	rc = qed_sp_init_request(p_hwfn, &p_ent,
2042 				 ETH_RAMROD_GFT_UPDATE_FILTER,
2043 				 PROTOCOLID_ETH, &init_data);
2044 	if (rc)
2045 		return rc;
2046 
2047 	p_ramrod = &p_ent->ramrod.rx_update_gft;
2048 
2049 	DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
2050 	p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
2051 
2052 	if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
2053 		p_ramrod->rx_qid_valid = 1;
2054 		p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
2055 	}
2056 
2057 	p_ramrod->flow_id_valid = 0;
2058 	p_ramrod->flow_id = 0;
2059 
2060 	p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
2061 	p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
2062 	    : GFT_DELETE_FILTER;
2063 
2064 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
2065 		   "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
2066 		   abs_vport_id, abs_rx_q_id,
2067 		   p_params->b_is_add ? "Adding" : "Removing",
2068 		   (u64)p_params->addr, p_params->length);
2069 
2070 	return qed_spq_post(p_hwfn, p_ent, NULL);
2071 }
2072 
2073 int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
2074 			 struct qed_ptt *p_ptt,
2075 			 struct qed_queue_cid *p_cid, u16 *p_rx_coal)
2076 {
2077 	u32 coalesce, address, is_valid;
2078 	struct cau_sb_entry sb_entry;
2079 	u8 timer_res;
2080 	int rc;
2081 
2082 	rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2083 			       p_cid->sb_igu_id * sizeof(u64),
2084 			       (u64)(uintptr_t)&sb_entry, 2, 0);
2085 	if (rc) {
2086 		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2087 		return rc;
2088 	}
2089 
2090 	timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
2091 
2092 	address = BAR0_MAP_REG_USDM_RAM +
2093 		  USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2094 	coalesce = qed_rd(p_hwfn, p_ptt, address);
2095 
2096 	is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2097 	if (!is_valid)
2098 		return -EINVAL;
2099 
2100 	coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2101 	*p_rx_coal = (u16)(coalesce << timer_res);
2102 
2103 	return 0;
2104 }
2105 
2106 int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
2107 			 struct qed_ptt *p_ptt,
2108 			 struct qed_queue_cid *p_cid, u16 *p_tx_coal)
2109 {
2110 	u32 coalesce, address, is_valid;
2111 	struct cau_sb_entry sb_entry;
2112 	u8 timer_res;
2113 	int rc;
2114 
2115 	rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2116 			       p_cid->sb_igu_id * sizeof(u64),
2117 			       (u64)(uintptr_t)&sb_entry, 2, 0);
2118 	if (rc) {
2119 		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2120 		return rc;
2121 	}
2122 
2123 	timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
2124 
2125 	address = BAR0_MAP_REG_XSDM_RAM +
2126 		  XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2127 	coalesce = qed_rd(p_hwfn, p_ptt, address);
2128 
2129 	is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2130 	if (!is_valid)
2131 		return -EINVAL;
2132 
2133 	coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2134 	*p_tx_coal = (u16)(coalesce << timer_res);
2135 
2136 	return 0;
2137 }
2138 
2139 int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle)
2140 {
2141 	struct qed_queue_cid *p_cid = handle;
2142 	struct qed_ptt *p_ptt;
2143 	int rc = 0;
2144 
2145 	if (IS_VF(p_hwfn->cdev)) {
2146 		rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
2147 		if (rc)
2148 			DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
2149 
2150 		return rc;
2151 	}
2152 
2153 	p_ptt = qed_ptt_acquire(p_hwfn);
2154 	if (!p_ptt)
2155 		return -EAGAIN;
2156 
2157 	if (p_cid->b_is_rx) {
2158 		rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2159 		if (rc)
2160 			goto out;
2161 	} else {
2162 		rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2163 		if (rc)
2164 			goto out;
2165 	}
2166 
2167 out:
2168 	qed_ptt_release(p_hwfn, p_ptt);
2169 
2170 	return rc;
2171 }
2172 
2173 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2174 				 struct qed_dev_eth_info *info)
2175 {
2176 	int i;
2177 
2178 	memset(info, 0, sizeof(*info));
2179 
2180 	info->num_tc = 1;
2181 
2182 	if (IS_PF(cdev)) {
2183 		int max_vf_vlan_filters = 0;
2184 		int max_vf_mac_filters = 0;
2185 
2186 		if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
2187 			u16 num_queues = 0;
2188 
2189 			/* Since the feature controls only queue-zones,
2190 			 * make sure we have the contexts [rx, tx, xdp] to
2191 			 * match.
2192 			 */
2193 			for_each_hwfn(cdev, i) {
2194 				struct qed_hwfn *hwfn = &cdev->hwfns[i];
2195 				u16 l2_queues = (u16)FEAT_NUM(hwfn,
2196 							      QED_PF_L2_QUE);
2197 				u16 cids;
2198 
2199 				cids = hwfn->pf_params.eth_pf_params.num_cons;
2200 				num_queues += min_t(u16, l2_queues, cids / 3);
2201 			}
2202 
2203 			/* queues might theoretically be >256, but interrupts'
2204 			 * upper-limit guarantes that it would fit in a u8.
2205 			 */
2206 			if (cdev->int_params.fp_msix_cnt) {
2207 				u8 irqs = cdev->int_params.fp_msix_cnt;
2208 
2209 				info->num_queues = (u8)min_t(u16,
2210 							     num_queues, irqs);
2211 			}
2212 		} else {
2213 			info->num_queues = cdev->num_hwfns;
2214 		}
2215 
2216 		if (IS_QED_SRIOV(cdev)) {
2217 			max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
2218 					      QED_ETH_VF_NUM_VLAN_FILTERS;
2219 			max_vf_mac_filters = cdev->p_iov_info->total_vfs *
2220 					     QED_ETH_VF_NUM_MAC_FILTERS;
2221 		}
2222 		info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2223 						  QED_VLAN) -
2224 					 max_vf_vlan_filters;
2225 		info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2226 						 QED_MAC) -
2227 					max_vf_mac_filters;
2228 
2229 		ether_addr_copy(info->port_mac,
2230 				cdev->hwfns[0].hw_info.hw_mac_addr);
2231 
2232 		info->xdp_supported = true;
2233 	} else {
2234 		u16 total_cids = 0;
2235 
2236 		/* Determine queues &  XDP support */
2237 		for_each_hwfn(cdev, i) {
2238 			struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2239 			u8 queues, cids;
2240 
2241 			qed_vf_get_num_cids(p_hwfn, &cids);
2242 			qed_vf_get_num_rxqs(p_hwfn, &queues);
2243 			info->num_queues += queues;
2244 			total_cids += cids;
2245 		}
2246 
2247 		/* Enable VF XDP in case PF guarntees sufficient connections */
2248 		if (total_cids >= info->num_queues * 3)
2249 			info->xdp_supported = true;
2250 
2251 		qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
2252 					    (u8 *)&info->num_vlan_filters);
2253 		qed_vf_get_num_mac_filters(&cdev->hwfns[0],
2254 					   (u8 *)&info->num_mac_filters);
2255 		qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
2256 
2257 		info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
2258 	}
2259 
2260 	qed_fill_dev_info(cdev, &info->common);
2261 
2262 	if (IS_VF(cdev))
2263 		eth_zero_addr(info->common.hw_mac);
2264 
2265 	return 0;
2266 }
2267 
2268 static void qed_register_eth_ops(struct qed_dev *cdev,
2269 				 struct qed_eth_cb_ops *ops, void *cookie)
2270 {
2271 	cdev->protocol_ops.eth = ops;
2272 	cdev->ops_cookie = cookie;
2273 
2274 	/* For VF, we start bulletin reading */
2275 	if (IS_VF(cdev))
2276 		qed_vf_start_iov_wq(cdev);
2277 }
2278 
2279 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
2280 {
2281 	if (IS_PF(cdev))
2282 		return true;
2283 
2284 	return qed_vf_check_mac(&cdev->hwfns[0], mac);
2285 }
2286 
2287 static int qed_start_vport(struct qed_dev *cdev,
2288 			   struct qed_start_vport_params *params)
2289 {
2290 	int rc, i;
2291 
2292 	for_each_hwfn(cdev, i) {
2293 		struct qed_sp_vport_start_params start = { 0 };
2294 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2295 
2296 		start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
2297 							QED_TPA_MODE_NONE;
2298 		start.remove_inner_vlan = params->remove_inner_vlan;
2299 		start.only_untagged = true;	/* untagged only */
2300 		start.drop_ttl0 = params->drop_ttl0;
2301 		start.opaque_fid = p_hwfn->hw_info.opaque_fid;
2302 		start.concrete_fid = p_hwfn->hw_info.concrete_fid;
2303 		start.handle_ptp_pkts = params->handle_ptp_pkts;
2304 		start.vport_id = params->vport_id;
2305 		start.max_buffers_per_cqe = 16;
2306 		start.mtu = params->mtu;
2307 
2308 		rc = qed_sp_vport_start(p_hwfn, &start);
2309 		if (rc) {
2310 			DP_ERR(cdev, "Failed to start VPORT\n");
2311 			return rc;
2312 		}
2313 
2314 		rc = qed_hw_start_fastpath(p_hwfn);
2315 		if (rc) {
2316 			DP_ERR(cdev, "Failed to start VPORT fastpath\n");
2317 			return rc;
2318 		}
2319 
2320 		DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2321 			   "Started V-PORT %d with MTU %d\n",
2322 			   start.vport_id, start.mtu);
2323 	}
2324 
2325 	if (params->clear_stats)
2326 		qed_reset_vport_stats(cdev);
2327 
2328 	return 0;
2329 }
2330 
2331 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
2332 {
2333 	int rc, i;
2334 
2335 	for_each_hwfn(cdev, i) {
2336 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2337 
2338 		rc = qed_sp_vport_stop(p_hwfn,
2339 				       p_hwfn->hw_info.opaque_fid, vport_id);
2340 
2341 		if (rc) {
2342 			DP_ERR(cdev, "Failed to stop VPORT\n");
2343 			return rc;
2344 		}
2345 	}
2346 	return 0;
2347 }
2348 
2349 static int qed_update_vport_rss(struct qed_dev *cdev,
2350 				struct qed_update_vport_rss_params *input,
2351 				struct qed_rss_params *rss)
2352 {
2353 	int i, fn;
2354 
2355 	/* Update configuration with what's correct regardless of CMT */
2356 	rss->update_rss_config = 1;
2357 	rss->rss_enable = 1;
2358 	rss->update_rss_capabilities = 1;
2359 	rss->update_rss_ind_table = 1;
2360 	rss->update_rss_key = 1;
2361 	rss->rss_caps = input->rss_caps;
2362 	memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
2363 
2364 	/* In regular scenario, we'd simply need to take input handlers.
2365 	 * But in CMT, we'd have to split the handlers according to the
2366 	 * engine they were configured on. We'd then have to understand
2367 	 * whether RSS is really required, since 2-queues on CMT doesn't
2368 	 * require RSS.
2369 	 */
2370 	if (cdev->num_hwfns == 1) {
2371 		memcpy(rss->rss_ind_table,
2372 		       input->rss_ind_table,
2373 		       QED_RSS_IND_TABLE_SIZE * sizeof(void *));
2374 		rss->rss_table_size_log = 7;
2375 		return 0;
2376 	}
2377 
2378 	/* Start by copying the non-spcific information to the 2nd copy */
2379 	memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
2380 
2381 	/* CMT should be round-robin */
2382 	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
2383 		struct qed_queue_cid *cid = input->rss_ind_table[i];
2384 		struct qed_rss_params *t_rss;
2385 
2386 		if (cid->p_owner == QED_LEADING_HWFN(cdev))
2387 			t_rss = &rss[0];
2388 		else
2389 			t_rss = &rss[1];
2390 
2391 		t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
2392 	}
2393 
2394 	/* Make sure RSS is actually required */
2395 	for_each_hwfn(cdev, fn) {
2396 		for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
2397 			if (rss[fn].rss_ind_table[i] !=
2398 			    rss[fn].rss_ind_table[0])
2399 				break;
2400 		}
2401 		if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
2402 			DP_VERBOSE(cdev, NETIF_MSG_IFUP,
2403 				   "CMT - 1 queue per-hwfn; Disabling RSS\n");
2404 			return -EINVAL;
2405 		}
2406 		rss[fn].rss_table_size_log = 6;
2407 	}
2408 
2409 	return 0;
2410 }
2411 
2412 static int qed_update_vport(struct qed_dev *cdev,
2413 			    struct qed_update_vport_params *params)
2414 {
2415 	struct qed_sp_vport_update_params sp_params;
2416 	struct qed_rss_params *rss;
2417 	int rc = 0, i;
2418 
2419 	if (!cdev)
2420 		return -ENODEV;
2421 
2422 	rss = vzalloc(sizeof(*rss) * cdev->num_hwfns);
2423 	if (!rss)
2424 		return -ENOMEM;
2425 
2426 	memset(&sp_params, 0, sizeof(sp_params));
2427 
2428 	/* Translate protocol params into sp params */
2429 	sp_params.vport_id = params->vport_id;
2430 	sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
2431 	sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
2432 	sp_params.vport_active_rx_flg = params->vport_active_flg;
2433 	sp_params.vport_active_tx_flg = params->vport_active_flg;
2434 	sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
2435 	sp_params.tx_switching_flg = params->tx_switching_flg;
2436 	sp_params.accept_any_vlan = params->accept_any_vlan;
2437 	sp_params.update_accept_any_vlan_flg =
2438 		params->update_accept_any_vlan_flg;
2439 
2440 	/* Prepare the RSS configuration */
2441 	if (params->update_rss_flg)
2442 		if (qed_update_vport_rss(cdev, &params->rss_params, rss))
2443 			params->update_rss_flg = 0;
2444 
2445 	for_each_hwfn(cdev, i) {
2446 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2447 
2448 		if (params->update_rss_flg)
2449 			sp_params.rss_params = &rss[i];
2450 
2451 		sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2452 		rc = qed_sp_vport_update(p_hwfn, &sp_params,
2453 					 QED_SPQ_MODE_EBLOCK,
2454 					 NULL);
2455 		if (rc) {
2456 			DP_ERR(cdev, "Failed to update VPORT\n");
2457 			goto out;
2458 		}
2459 
2460 		DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2461 			   "Updated V-PORT %d: active_flag %d [update %d]\n",
2462 			   params->vport_id, params->vport_active_flg,
2463 			   params->update_vport_active_flg);
2464 	}
2465 
2466 out:
2467 	vfree(rss);
2468 	return rc;
2469 }
2470 
2471 static int qed_start_rxq(struct qed_dev *cdev,
2472 			 u8 rss_num,
2473 			 struct qed_queue_start_common_params *p_params,
2474 			 u16 bd_max_bytes,
2475 			 dma_addr_t bd_chain_phys_addr,
2476 			 dma_addr_t cqe_pbl_addr,
2477 			 u16 cqe_pbl_size,
2478 			 struct qed_rxq_start_ret_params *ret_params)
2479 {
2480 	struct qed_hwfn *p_hwfn;
2481 	int rc, hwfn_index;
2482 
2483 	hwfn_index = rss_num % cdev->num_hwfns;
2484 	p_hwfn = &cdev->hwfns[hwfn_index];
2485 
2486 	p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2487 	p_params->stats_id = p_params->vport_id;
2488 
2489 	rc = qed_eth_rx_queue_start(p_hwfn,
2490 				    p_hwfn->hw_info.opaque_fid,
2491 				    p_params,
2492 				    bd_max_bytes,
2493 				    bd_chain_phys_addr,
2494 				    cqe_pbl_addr, cqe_pbl_size, ret_params);
2495 	if (rc) {
2496 		DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
2497 		return rc;
2498 	}
2499 
2500 	DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2501 		   "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2502 		   p_params->queue_id, rss_num, p_params->vport_id,
2503 		   p_params->p_sb->igu_sb_id);
2504 
2505 	return 0;
2506 }
2507 
2508 static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
2509 {
2510 	int rc, hwfn_index;
2511 	struct qed_hwfn *p_hwfn;
2512 
2513 	hwfn_index = rss_id % cdev->num_hwfns;
2514 	p_hwfn = &cdev->hwfns[hwfn_index];
2515 
2516 	rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
2517 	if (rc) {
2518 		DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
2519 		return rc;
2520 	}
2521 
2522 	return 0;
2523 }
2524 
2525 static int qed_start_txq(struct qed_dev *cdev,
2526 			 u8 rss_num,
2527 			 struct qed_queue_start_common_params *p_params,
2528 			 dma_addr_t pbl_addr,
2529 			 u16 pbl_size,
2530 			 struct qed_txq_start_ret_params *ret_params)
2531 {
2532 	struct qed_hwfn *p_hwfn;
2533 	int rc, hwfn_index;
2534 
2535 	hwfn_index = rss_num % cdev->num_hwfns;
2536 	p_hwfn = &cdev->hwfns[hwfn_index];
2537 	p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2538 	p_params->stats_id = p_params->vport_id;
2539 
2540 	rc = qed_eth_tx_queue_start(p_hwfn,
2541 				    p_hwfn->hw_info.opaque_fid,
2542 				    p_params, 0,
2543 				    pbl_addr, pbl_size, ret_params);
2544 
2545 	if (rc) {
2546 		DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
2547 		return rc;
2548 	}
2549 
2550 	DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2551 		   "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2552 		   p_params->queue_id, rss_num, p_params->vport_id,
2553 		   p_params->p_sb->igu_sb_id);
2554 
2555 	return 0;
2556 }
2557 
2558 #define QED_HW_STOP_RETRY_LIMIT (10)
2559 static int qed_fastpath_stop(struct qed_dev *cdev)
2560 {
2561 	int rc;
2562 
2563 	rc = qed_hw_stop_fastpath(cdev);
2564 	if (rc) {
2565 		DP_ERR(cdev, "Failed to stop Fastpath\n");
2566 		return rc;
2567 	}
2568 
2569 	return 0;
2570 }
2571 
2572 static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
2573 {
2574 	struct qed_hwfn *p_hwfn;
2575 	int rc, hwfn_index;
2576 
2577 	hwfn_index = rss_id % cdev->num_hwfns;
2578 	p_hwfn = &cdev->hwfns[hwfn_index];
2579 
2580 	rc = qed_eth_tx_queue_stop(p_hwfn, handle);
2581 	if (rc) {
2582 		DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
2583 		return rc;
2584 	}
2585 
2586 	return 0;
2587 }
2588 
2589 static int qed_tunn_configure(struct qed_dev *cdev,
2590 			      struct qed_tunn_params *tunn_params)
2591 {
2592 	struct qed_tunnel_info tunn_info;
2593 	int i, rc;
2594 
2595 	memset(&tunn_info, 0, sizeof(tunn_info));
2596 	if (tunn_params->update_vxlan_port) {
2597 		tunn_info.vxlan_port.b_update_port = true;
2598 		tunn_info.vxlan_port.port = tunn_params->vxlan_port;
2599 	}
2600 
2601 	if (tunn_params->update_geneve_port) {
2602 		tunn_info.geneve_port.b_update_port = true;
2603 		tunn_info.geneve_port.port = tunn_params->geneve_port;
2604 	}
2605 
2606 	for_each_hwfn(cdev, i) {
2607 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
2608 		struct qed_ptt *p_ptt;
2609 		struct qed_tunnel_info *tun;
2610 
2611 		tun = &hwfn->cdev->tunnel;
2612 		if (IS_PF(cdev)) {
2613 			p_ptt = qed_ptt_acquire(hwfn);
2614 			if (!p_ptt)
2615 				return -EAGAIN;
2616 		} else {
2617 			p_ptt = NULL;
2618 		}
2619 
2620 		rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
2621 					       QED_SPQ_MODE_EBLOCK, NULL);
2622 		if (rc) {
2623 			if (IS_PF(cdev))
2624 				qed_ptt_release(hwfn, p_ptt);
2625 			return rc;
2626 		}
2627 
2628 		if (IS_PF_SRIOV(hwfn)) {
2629 			u16 vxlan_port, geneve_port;
2630 			int j;
2631 
2632 			vxlan_port = tun->vxlan_port.port;
2633 			geneve_port = tun->geneve_port.port;
2634 
2635 			qed_for_each_vf(hwfn, j) {
2636 				qed_iov_bulletin_set_udp_ports(hwfn, j,
2637 							       vxlan_port,
2638 							       geneve_port);
2639 			}
2640 
2641 			qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2642 		}
2643 		if (IS_PF(cdev))
2644 			qed_ptt_release(hwfn, p_ptt);
2645 	}
2646 
2647 	return 0;
2648 }
2649 
2650 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2651 					enum qed_filter_rx_mode_type type)
2652 {
2653 	struct qed_filter_accept_flags accept_flags;
2654 
2655 	memset(&accept_flags, 0, sizeof(accept_flags));
2656 
2657 	accept_flags.update_rx_mode_config = 1;
2658 	accept_flags.update_tx_mode_config = 1;
2659 	accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2660 					QED_ACCEPT_MCAST_MATCHED |
2661 					QED_ACCEPT_BCAST;
2662 	accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2663 					QED_ACCEPT_MCAST_MATCHED |
2664 					QED_ACCEPT_BCAST;
2665 
2666 	if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2667 		accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2668 						 QED_ACCEPT_MCAST_UNMATCHED;
2669 		accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2670 	} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2671 		accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2672 		accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2673 	}
2674 
2675 	return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
2676 				     QED_SPQ_MODE_CB, NULL);
2677 }
2678 
2679 static int qed_configure_filter_ucast(struct qed_dev *cdev,
2680 				      struct qed_filter_ucast_params *params)
2681 {
2682 	struct qed_filter_ucast ucast;
2683 
2684 	if (!params->vlan_valid && !params->mac_valid) {
2685 		DP_NOTICE(cdev,
2686 			  "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
2687 		return -EINVAL;
2688 	}
2689 
2690 	memset(&ucast, 0, sizeof(ucast));
2691 	switch (params->type) {
2692 	case QED_FILTER_XCAST_TYPE_ADD:
2693 		ucast.opcode = QED_FILTER_ADD;
2694 		break;
2695 	case QED_FILTER_XCAST_TYPE_DEL:
2696 		ucast.opcode = QED_FILTER_REMOVE;
2697 		break;
2698 	case QED_FILTER_XCAST_TYPE_REPLACE:
2699 		ucast.opcode = QED_FILTER_REPLACE;
2700 		break;
2701 	default:
2702 		DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
2703 			  params->type);
2704 	}
2705 
2706 	if (params->vlan_valid && params->mac_valid) {
2707 		ucast.type = QED_FILTER_MAC_VLAN;
2708 		ether_addr_copy(ucast.mac, params->mac);
2709 		ucast.vlan = params->vlan;
2710 	} else if (params->mac_valid) {
2711 		ucast.type = QED_FILTER_MAC;
2712 		ether_addr_copy(ucast.mac, params->mac);
2713 	} else {
2714 		ucast.type = QED_FILTER_VLAN;
2715 		ucast.vlan = params->vlan;
2716 	}
2717 
2718 	ucast.is_rx_filter = true;
2719 	ucast.is_tx_filter = true;
2720 
2721 	return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
2722 }
2723 
2724 static int qed_configure_filter_mcast(struct qed_dev *cdev,
2725 				      struct qed_filter_mcast_params *params)
2726 {
2727 	struct qed_filter_mcast mcast;
2728 	int i;
2729 
2730 	memset(&mcast, 0, sizeof(mcast));
2731 	switch (params->type) {
2732 	case QED_FILTER_XCAST_TYPE_ADD:
2733 		mcast.opcode = QED_FILTER_ADD;
2734 		break;
2735 	case QED_FILTER_XCAST_TYPE_DEL:
2736 		mcast.opcode = QED_FILTER_REMOVE;
2737 		break;
2738 	default:
2739 		DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
2740 			  params->type);
2741 	}
2742 
2743 	mcast.num_mc_addrs = params->num;
2744 	for (i = 0; i < mcast.num_mc_addrs; i++)
2745 		ether_addr_copy(mcast.mac[i], params->mac[i]);
2746 
2747 	return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
2748 }
2749 
2750 static int qed_configure_filter(struct qed_dev *cdev,
2751 				struct qed_filter_params *params)
2752 {
2753 	enum qed_filter_rx_mode_type accept_flags;
2754 
2755 	switch (params->type) {
2756 	case QED_FILTER_TYPE_UCAST:
2757 		return qed_configure_filter_ucast(cdev, &params->filter.ucast);
2758 	case QED_FILTER_TYPE_MCAST:
2759 		return qed_configure_filter_mcast(cdev, &params->filter.mcast);
2760 	case QED_FILTER_TYPE_RX_MODE:
2761 		accept_flags = params->filter.accept_flags;
2762 		return qed_configure_filter_rx_mode(cdev, accept_flags);
2763 	default:
2764 		DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
2765 		return -EINVAL;
2766 	}
2767 }
2768 
2769 static int qed_configure_arfs_searcher(struct qed_dev *cdev,
2770 				       enum qed_filter_config_mode mode)
2771 {
2772 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2773 	struct qed_arfs_config_params arfs_config_params;
2774 
2775 	memset(&arfs_config_params, 0, sizeof(arfs_config_params));
2776 	arfs_config_params.tcp = true;
2777 	arfs_config_params.udp = true;
2778 	arfs_config_params.ipv4 = true;
2779 	arfs_config_params.ipv6 = true;
2780 	arfs_config_params.mode = mode;
2781 	qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
2782 				&arfs_config_params);
2783 	return 0;
2784 }
2785 
2786 static void
2787 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
2788 			     void *cookie,
2789 			     union event_ring_data *data, u8 fw_return_code)
2790 {
2791 	struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
2792 	void *dev = p_hwfn->cdev->ops_cookie;
2793 
2794 	op->arfs_filter_op(dev, cookie, fw_return_code);
2795 }
2796 
2797 static int
2798 qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
2799 			      void *cookie,
2800 			      struct qed_ntuple_filter_params *params)
2801 {
2802 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2803 	struct qed_spq_comp_cb cb;
2804 	int rc = -EINVAL;
2805 
2806 	cb.function = qed_arfs_sp_response_handler;
2807 	cb.cookie = cookie;
2808 
2809 	if (params->b_is_vf) {
2810 		if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
2811 					   false)) {
2812 			DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
2813 				params->vf_id);
2814 			return rc;
2815 		}
2816 
2817 		params->vport_id = params->vf_id + 1;
2818 		params->qid = QED_RFS_NTUPLE_QID_RSS;
2819 	}
2820 
2821 	rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
2822 	if (rc)
2823 		DP_NOTICE(p_hwfn,
2824 			  "Failed to issue a-RFS filter configuration\n");
2825 	else
2826 		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
2827 			   "Successfully issued a-RFS filter configuration\n");
2828 
2829 	return rc;
2830 }
2831 
2832 static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
2833 {
2834 	struct qed_queue_cid *p_cid = handle;
2835 	struct qed_hwfn *p_hwfn;
2836 	int rc;
2837 
2838 	p_hwfn = p_cid->p_owner;
2839 	rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
2840 	if (rc)
2841 		DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
2842 
2843 	return rc;
2844 }
2845 
2846 static int qed_fp_cqe_completion(struct qed_dev *dev,
2847 				 u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
2848 {
2849 	return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2850 				      cqe);
2851 }
2852 
2853 #ifdef CONFIG_QED_SRIOV
2854 extern const struct qed_iov_hv_ops qed_iov_ops_pass;
2855 #endif
2856 
2857 #ifdef CONFIG_DCB
2858 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
2859 #endif
2860 
2861 extern const struct qed_eth_ptp_ops qed_ptp_ops_pass;
2862 
2863 static const struct qed_eth_ops qed_eth_ops_pass = {
2864 	.common = &qed_common_ops_pass,
2865 #ifdef CONFIG_QED_SRIOV
2866 	.iov = &qed_iov_ops_pass,
2867 #endif
2868 #ifdef CONFIG_DCB
2869 	.dcb = &qed_dcbnl_ops_pass,
2870 #endif
2871 	.ptp = &qed_ptp_ops_pass,
2872 	.fill_dev_info = &qed_fill_eth_dev_info,
2873 	.register_ops = &qed_register_eth_ops,
2874 	.check_mac = &qed_check_mac,
2875 	.vport_start = &qed_start_vport,
2876 	.vport_stop = &qed_stop_vport,
2877 	.vport_update = &qed_update_vport,
2878 	.q_rx_start = &qed_start_rxq,
2879 	.q_rx_stop = &qed_stop_rxq,
2880 	.q_tx_start = &qed_start_txq,
2881 	.q_tx_stop = &qed_stop_txq,
2882 	.filter_config = &qed_configure_filter,
2883 	.fastpath_stop = &qed_fastpath_stop,
2884 	.eth_cqe_completion = &qed_fp_cqe_completion,
2885 	.get_vport_stats = &qed_get_vport_stats,
2886 	.tunn_config = &qed_tunn_configure,
2887 	.ntuple_filter_config = &qed_ntuple_arfs_filter_config,
2888 	.configure_arfs_searcher = &qed_configure_arfs_searcher,
2889 	.get_coalesce = &qed_get_coalesce,
2890 };
2891 
2892 const struct qed_eth_ops *qed_get_eth_ops(void)
2893 {
2894 	return &qed_eth_ops_pass;
2895 }
2896 EXPORT_SYMBOL(qed_get_eth_ops);
2897 
2898 void qed_put_eth_ops(void)
2899 {
2900 	/* TODO - reference count for module? */
2901 }
2902 EXPORT_SYMBOL(qed_put_eth_ops);
2903