1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <asm/param.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/etherdevice.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/stddef.h>
19 #include <linux/string.h>
20 #include <linux/workqueue.h>
21 #include <linux/bitops.h>
22 #include <linux/bug.h>
23 #include <linux/vmalloc.h>
24 #include "qed.h"
25 #include <linux/qed/qed_chain.h>
26 #include "qed_cxt.h"
27 #include "qed_dcbx.h"
28 #include "qed_dev_api.h"
29 #include <linux/qed/qed_eth_if.h>
30 #include "qed_hsi.h"
31 #include "qed_hw.h"
32 #include "qed_int.h"
33 #include "qed_l2.h"
34 #include "qed_mcp.h"
35 #include "qed_ptp.h"
36 #include "qed_reg_addr.h"
37 #include "qed_sp.h"
38 #include "qed_sriov.h"
39 
40 
41 #define QED_MAX_SGES_NUM 16
42 #define CRC32_POLY 0x1edc6f41
43 
44 struct qed_l2_info {
45 	u32 queues;
46 	unsigned long **pp_qid_usage;
47 
48 	/* The lock is meant to synchronize access to the qid usage */
49 	struct mutex lock;
50 };
51 
52 int qed_l2_alloc(struct qed_hwfn *p_hwfn)
53 {
54 	struct qed_l2_info *p_l2_info;
55 	unsigned long **pp_qids;
56 	u32 i;
57 
58 	if (!QED_IS_L2_PERSONALITY(p_hwfn))
59 		return 0;
60 
61 	p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
62 	if (!p_l2_info)
63 		return -ENOMEM;
64 	p_hwfn->p_l2_info = p_l2_info;
65 
66 	if (IS_PF(p_hwfn->cdev)) {
67 		p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE);
68 	} else {
69 		u8 rx = 0, tx = 0;
70 
71 		qed_vf_get_num_rxqs(p_hwfn, &rx);
72 		qed_vf_get_num_txqs(p_hwfn, &tx);
73 
74 		p_l2_info->queues = max_t(u8, rx, tx);
75 	}
76 
77 	pp_qids = kcalloc(p_l2_info->queues, sizeof(unsigned long *),
78 			  GFP_KERNEL);
79 	if (!pp_qids)
80 		return -ENOMEM;
81 	p_l2_info->pp_qid_usage = pp_qids;
82 
83 	for (i = 0; i < p_l2_info->queues; i++) {
84 		pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL);
85 		if (!pp_qids[i])
86 			return -ENOMEM;
87 	}
88 
89 	return 0;
90 }
91 
92 void qed_l2_setup(struct qed_hwfn *p_hwfn)
93 {
94 	if (!QED_IS_L2_PERSONALITY(p_hwfn))
95 		return;
96 
97 	mutex_init(&p_hwfn->p_l2_info->lock);
98 }
99 
100 void qed_l2_free(struct qed_hwfn *p_hwfn)
101 {
102 	u32 i;
103 
104 	if (!QED_IS_L2_PERSONALITY(p_hwfn))
105 		return;
106 
107 	if (!p_hwfn->p_l2_info)
108 		return;
109 
110 	if (!p_hwfn->p_l2_info->pp_qid_usage)
111 		goto out_l2_info;
112 
113 	/* Free until hit first uninitialized entry */
114 	for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
115 		if (!p_hwfn->p_l2_info->pp_qid_usage[i])
116 			break;
117 		kfree(p_hwfn->p_l2_info->pp_qid_usage[i]);
118 	}
119 
120 	kfree(p_hwfn->p_l2_info->pp_qid_usage);
121 
122 out_l2_info:
123 	kfree(p_hwfn->p_l2_info);
124 	p_hwfn->p_l2_info = NULL;
125 }
126 
127 static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn,
128 					struct qed_queue_cid *p_cid)
129 {
130 	struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info;
131 	u16 queue_id = p_cid->rel.queue_id;
132 	bool b_rc = true;
133 	u8 first;
134 
135 	mutex_lock(&p_l2_info->lock);
136 
137 	if (queue_id >= p_l2_info->queues) {
138 		DP_NOTICE(p_hwfn,
139 			  "Requested to increase usage for qzone %04x out of %08x\n",
140 			  queue_id, p_l2_info->queues);
141 		b_rc = false;
142 		goto out;
143 	}
144 
145 	first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id],
146 					MAX_QUEUES_PER_QZONE);
147 	if (first >= MAX_QUEUES_PER_QZONE) {
148 		b_rc = false;
149 		goto out;
150 	}
151 
152 	__set_bit(first, p_l2_info->pp_qid_usage[queue_id]);
153 	p_cid->qid_usage_idx = first;
154 
155 out:
156 	mutex_unlock(&p_l2_info->lock);
157 	return b_rc;
158 }
159 
160 static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn,
161 					struct qed_queue_cid *p_cid)
162 {
163 	mutex_lock(&p_hwfn->p_l2_info->lock);
164 
165 	clear_bit(p_cid->qid_usage_idx,
166 		  p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
167 
168 	mutex_unlock(&p_hwfn->p_l2_info->lock);
169 }
170 
171 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
172 			       struct qed_queue_cid *p_cid)
173 {
174 	bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID);
175 
176 	if (IS_PF(p_hwfn->cdev) && !b_legacy_vf)
177 		_qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
178 
179 	/* For PF's VFs we maintain the index inside queue-zone in IOV */
180 	if (p_cid->vfid == QED_QUEUE_CID_SELF)
181 		qed_eth_queue_qid_usage_del(p_hwfn, p_cid);
182 
183 	vfree(p_cid);
184 }
185 
186 /* The internal is only meant to be directly called by PFs initializeing CIDs
187  * for their VFs.
188  */
189 static struct qed_queue_cid *
190 _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
191 		      u16 opaque_fid,
192 		      u32 cid,
193 		      struct qed_queue_start_common_params *p_params,
194 		      bool b_is_rx,
195 		      struct qed_queue_cid_vf_params *p_vf_params)
196 {
197 	struct qed_queue_cid *p_cid;
198 	int rc;
199 
200 	p_cid = vzalloc(sizeof(*p_cid));
201 	if (!p_cid)
202 		return NULL;
203 
204 	p_cid->opaque_fid = opaque_fid;
205 	p_cid->cid = cid;
206 	p_cid->p_owner = p_hwfn;
207 
208 	/* Fill in parameters */
209 	p_cid->rel.vport_id = p_params->vport_id;
210 	p_cid->rel.queue_id = p_params->queue_id;
211 	p_cid->rel.stats_id = p_params->stats_id;
212 	p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
213 	p_cid->b_is_rx = b_is_rx;
214 	p_cid->sb_idx = p_params->sb_idx;
215 
216 	/* Fill-in bits related to VFs' queues if information was provided */
217 	if (p_vf_params) {
218 		p_cid->vfid = p_vf_params->vfid;
219 		p_cid->vf_qid = p_vf_params->vf_qid;
220 		p_cid->vf_legacy = p_vf_params->vf_legacy;
221 	} else {
222 		p_cid->vfid = QED_QUEUE_CID_SELF;
223 	}
224 
225 	/* Don't try calculating the absolute indices for VFs */
226 	if (IS_VF(p_hwfn->cdev)) {
227 		p_cid->abs = p_cid->rel;
228 		goto out;
229 	}
230 
231 	/* Calculate the engine-absolute indices of the resources.
232 	 * This would guarantee they're valid later on.
233 	 * In some cases [SBs] we already have the right values.
234 	 */
235 	rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
236 	if (rc)
237 		goto fail;
238 
239 	rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
240 	if (rc)
241 		goto fail;
242 
243 	/* In case of a PF configuring its VF's queues, the stats-id is already
244 	 * absolute [since there's a single index that's suitable per-VF].
245 	 */
246 	if (p_cid->vfid == QED_QUEUE_CID_SELF) {
247 		rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
248 				  &p_cid->abs.stats_id);
249 		if (rc)
250 			goto fail;
251 	} else {
252 		p_cid->abs.stats_id = p_cid->rel.stats_id;
253 	}
254 
255 out:
256 	/* VF-images have provided the qid_usage_idx on their own.
257 	 * Otherwise, we need to allocate a unique one.
258 	 */
259 	if (!p_vf_params) {
260 		if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid))
261 			goto fail;
262 	} else {
263 		p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
264 	}
265 
266 	DP_VERBOSE(p_hwfn,
267 		   QED_MSG_SP,
268 		   "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
269 		   p_cid->opaque_fid,
270 		   p_cid->cid,
271 		   p_cid->rel.vport_id,
272 		   p_cid->abs.vport_id,
273 		   p_cid->rel.queue_id,
274 		   p_cid->qid_usage_idx,
275 		   p_cid->abs.queue_id,
276 		   p_cid->rel.stats_id,
277 		   p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx);
278 
279 	return p_cid;
280 
281 fail:
282 	vfree(p_cid);
283 	return NULL;
284 }
285 
286 struct qed_queue_cid *
287 qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
288 		     u16 opaque_fid,
289 		     struct qed_queue_start_common_params *p_params,
290 		     bool b_is_rx,
291 		     struct qed_queue_cid_vf_params *p_vf_params)
292 {
293 	struct qed_queue_cid *p_cid;
294 	u8 vfid = QED_CXT_PF_CID;
295 	bool b_legacy_vf = false;
296 	u32 cid = 0;
297 
298 	/* In case of legacy VFs, The CID can be derived from the additional
299 	 * VF parameters - the VF assumes queue X uses CID X, so we can simply
300 	 * use the vf_qid for this purpose as well.
301 	 */
302 	if (p_vf_params) {
303 		vfid = p_vf_params->vfid;
304 
305 		if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) {
306 			b_legacy_vf = true;
307 			cid = p_vf_params->vf_qid;
308 		}
309 	}
310 
311 	/* Get a unique firmware CID for this queue, in case it's a PF.
312 	 * VF's don't need a CID as the queue configuration will be done
313 	 * by PF.
314 	 */
315 	if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
316 		if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
317 					 &cid, vfid)) {
318 			DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
319 			return NULL;
320 		}
321 	}
322 
323 	p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
324 				      p_params, b_is_rx, p_vf_params);
325 	if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
326 		_qed_cxt_release_cid(p_hwfn, cid, vfid);
327 
328 	return p_cid;
329 }
330 
331 static struct qed_queue_cid *
332 qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn,
333 			u16 opaque_fid,
334 			bool b_is_rx,
335 			struct qed_queue_start_common_params *p_params)
336 {
337 	return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
338 				    NULL);
339 }
340 
341 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
342 			   struct qed_sp_vport_start_params *p_params)
343 {
344 	struct vport_start_ramrod_data *p_ramrod = NULL;
345 	struct eth_vport_tpa_param *tpa_param;
346 	struct qed_spq_entry *p_ent =  NULL;
347 	struct qed_sp_init_data init_data;
348 	u16 min_size, rx_mode = 0;
349 	u8 abs_vport_id = 0;
350 	int rc;
351 
352 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
353 	if (rc)
354 		return rc;
355 
356 	memset(&init_data, 0, sizeof(init_data));
357 	init_data.cid = qed_spq_get_cid(p_hwfn);
358 	init_data.opaque_fid = p_params->opaque_fid;
359 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
360 
361 	rc = qed_sp_init_request(p_hwfn, &p_ent,
362 				 ETH_RAMROD_VPORT_START,
363 				 PROTOCOLID_ETH, &init_data);
364 	if (rc)
365 		return rc;
366 
367 	p_ramrod		= &p_ent->ramrod.vport_start;
368 	p_ramrod->vport_id	= abs_vport_id;
369 
370 	p_ramrod->mtu			= cpu_to_le16(p_params->mtu);
371 	p_ramrod->handle_ptp_pkts	= p_params->handle_ptp_pkts;
372 	p_ramrod->inner_vlan_removal_en	= p_params->remove_inner_vlan;
373 	p_ramrod->drop_ttl0_en		= p_params->drop_ttl0;
374 	p_ramrod->untagged		= p_params->only_untagged;
375 
376 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
377 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
378 
379 	p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
380 
381 	/* TPA related fields */
382 	tpa_param = &p_ramrod->tpa_param;
383 	memset(tpa_param, 0, sizeof(*tpa_param));
384 
385 	tpa_param->max_buff_num = p_params->max_buffers_per_cqe;
386 
387 	switch (p_params->tpa_mode) {
388 	case QED_TPA_MODE_GRO:
389 		min_size = p_params->mtu / 2;
390 
391 		tpa_param->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
392 		tpa_param->tpa_max_size = cpu_to_le16(U16_MAX);
393 		tpa_param->tpa_min_size_to_cont = cpu_to_le16(min_size);
394 		tpa_param->tpa_min_size_to_start = cpu_to_le16(min_size);
395 		tpa_param->tpa_ipv4_en_flg = 1;
396 		tpa_param->tpa_ipv6_en_flg = 1;
397 		tpa_param->tpa_pkt_split_flg = 1;
398 		tpa_param->tpa_gro_consistent_flg = 1;
399 	default:
400 		break;
401 	}
402 
403 	p_ramrod->tx_switching_en = p_params->tx_switching;
404 
405 	p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
406 	p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
407 
408 	/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
409 	p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
410 						  p_params->concrete_fid);
411 
412 	return qed_spq_post(p_hwfn, p_ent, NULL);
413 }
414 
415 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
416 			      struct qed_sp_vport_start_params *p_params)
417 {
418 	if (IS_VF(p_hwfn->cdev)) {
419 		return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
420 					     p_params->mtu,
421 					     p_params->remove_inner_vlan,
422 					     p_params->tpa_mode,
423 					     p_params->max_buffers_per_cqe,
424 					     p_params->only_untagged);
425 	}
426 
427 	return qed_sp_eth_vport_start(p_hwfn, p_params);
428 }
429 
430 static int
431 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
432 			struct vport_update_ramrod_data *p_ramrod,
433 			struct qed_rss_params *p_rss)
434 {
435 	struct eth_vport_rss_config *p_config;
436 	u16 capabilities = 0;
437 	int i, table_size;
438 	int rc = 0;
439 
440 	if (!p_rss) {
441 		p_ramrod->common.update_rss_flg = 0;
442 		return rc;
443 	}
444 	p_config = &p_ramrod->rss_config;
445 
446 	BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
447 
448 	rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
449 	if (rc)
450 		return rc;
451 
452 	p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
453 	p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
454 	p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
455 	p_config->update_rss_key = p_rss->update_rss_key;
456 
457 	p_config->rss_mode = p_rss->rss_enable ?
458 			     ETH_VPORT_RSS_MODE_REGULAR :
459 			     ETH_VPORT_RSS_MODE_DISABLED;
460 
461 	SET_FIELD(capabilities,
462 		  ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
463 		  !!(p_rss->rss_caps & QED_RSS_IPV4));
464 	SET_FIELD(capabilities,
465 		  ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
466 		  !!(p_rss->rss_caps & QED_RSS_IPV6));
467 	SET_FIELD(capabilities,
468 		  ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
469 		  !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
470 	SET_FIELD(capabilities,
471 		  ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
472 		  !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
473 	SET_FIELD(capabilities,
474 		  ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
475 		  !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
476 	SET_FIELD(capabilities,
477 		  ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
478 		  !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
479 	p_config->tbl_size = p_rss->rss_table_size_log;
480 
481 	p_config->capabilities = cpu_to_le16(capabilities);
482 
483 	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
484 		   "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
485 		   p_ramrod->common.update_rss_flg,
486 		   p_config->rss_mode,
487 		   p_config->update_rss_capabilities,
488 		   p_config->capabilities,
489 		   p_config->update_rss_ind_table, p_config->update_rss_key);
490 
491 	table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
492 			   1 << p_config->tbl_size);
493 	for (i = 0; i < table_size; i++) {
494 		struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
495 
496 		if (!p_queue)
497 			return -EINVAL;
498 
499 		p_config->indirection_table[i] =
500 		    cpu_to_le16(p_queue->abs.queue_id);
501 	}
502 
503 	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
504 		   "Configured RSS indirection table [%d entries]:\n",
505 		   table_size);
506 	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
507 		DP_VERBOSE(p_hwfn,
508 			   NETIF_MSG_IFUP,
509 			   "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
510 			   le16_to_cpu(p_config->indirection_table[i]),
511 			   le16_to_cpu(p_config->indirection_table[i + 1]),
512 			   le16_to_cpu(p_config->indirection_table[i + 2]),
513 			   le16_to_cpu(p_config->indirection_table[i + 3]),
514 			   le16_to_cpu(p_config->indirection_table[i + 4]),
515 			   le16_to_cpu(p_config->indirection_table[i + 5]),
516 			   le16_to_cpu(p_config->indirection_table[i + 6]),
517 			   le16_to_cpu(p_config->indirection_table[i + 7]),
518 			   le16_to_cpu(p_config->indirection_table[i + 8]),
519 			   le16_to_cpu(p_config->indirection_table[i + 9]),
520 			   le16_to_cpu(p_config->indirection_table[i + 10]),
521 			   le16_to_cpu(p_config->indirection_table[i + 11]),
522 			   le16_to_cpu(p_config->indirection_table[i + 12]),
523 			   le16_to_cpu(p_config->indirection_table[i + 13]),
524 			   le16_to_cpu(p_config->indirection_table[i + 14]),
525 			   le16_to_cpu(p_config->indirection_table[i + 15]));
526 	}
527 
528 	for (i = 0; i < 10; i++)
529 		p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
530 
531 	return rc;
532 }
533 
534 static void
535 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
536 			  struct vport_update_ramrod_data *p_ramrod,
537 			  struct qed_filter_accept_flags accept_flags)
538 {
539 	p_ramrod->common.update_rx_mode_flg =
540 		accept_flags.update_rx_mode_config;
541 
542 	p_ramrod->common.update_tx_mode_flg =
543 		accept_flags.update_tx_mode_config;
544 
545 	/* Set Rx mode accept flags */
546 	if (p_ramrod->common.update_rx_mode_flg) {
547 		u8 accept_filter = accept_flags.rx_accept_filter;
548 		u16 state = 0;
549 
550 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
551 			  !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
552 			    !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
553 
554 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
555 			  !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
556 
557 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
558 			  !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
559 			    !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
560 
561 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
562 			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
563 			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
564 
565 		SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
566 			  !!(accept_filter & QED_ACCEPT_BCAST));
567 
568 		SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI,
569 			  !!(accept_filter & QED_ACCEPT_ANY_VNI));
570 
571 		p_ramrod->rx_mode.state = cpu_to_le16(state);
572 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
573 			   "p_ramrod->rx_mode.state = 0x%x\n", state);
574 	}
575 
576 	/* Set Tx mode accept flags */
577 	if (p_ramrod->common.update_tx_mode_flg) {
578 		u8 accept_filter = accept_flags.tx_accept_filter;
579 		u16 state = 0;
580 
581 		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
582 			  !!(accept_filter & QED_ACCEPT_NONE));
583 
584 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
585 			  !!(accept_filter & QED_ACCEPT_NONE));
586 
587 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
588 			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
589 			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
590 
591 		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
592 			  (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
593 			   !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
594 
595 		SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
596 			  !!(accept_filter & QED_ACCEPT_BCAST));
597 
598 		p_ramrod->tx_mode.state = cpu_to_le16(state);
599 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
600 			   "p_ramrod->tx_mode.state = 0x%x\n", state);
601 	}
602 }
603 
604 static void
605 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
606 			    struct vport_update_ramrod_data *p_ramrod,
607 			    const struct qed_sge_tpa_params *param)
608 {
609 	struct eth_vport_tpa_param *tpa;
610 
611 	if (!param) {
612 		p_ramrod->common.update_tpa_param_flg = 0;
613 		p_ramrod->common.update_tpa_en_flg = 0;
614 		p_ramrod->common.update_tpa_param_flg = 0;
615 		return;
616 	}
617 
618 	p_ramrod->common.update_tpa_en_flg = param->update_tpa_en_flg;
619 	tpa = &p_ramrod->tpa_param;
620 	tpa->tpa_ipv4_en_flg = param->tpa_ipv4_en_flg;
621 	tpa->tpa_ipv6_en_flg = param->tpa_ipv6_en_flg;
622 	tpa->tpa_ipv4_tunn_en_flg = param->tpa_ipv4_tunn_en_flg;
623 	tpa->tpa_ipv6_tunn_en_flg = param->tpa_ipv6_tunn_en_flg;
624 
625 	p_ramrod->common.update_tpa_param_flg = param->update_tpa_param_flg;
626 	tpa->max_buff_num = param->max_buffers_per_cqe;
627 	tpa->tpa_pkt_split_flg = param->tpa_pkt_split_flg;
628 	tpa->tpa_hdr_data_split_flg = param->tpa_hdr_data_split_flg;
629 	tpa->tpa_gro_consistent_flg = param->tpa_gro_consistent_flg;
630 	tpa->tpa_max_aggs_num = param->tpa_max_aggs_num;
631 	tpa->tpa_max_size = cpu_to_le16(param->tpa_max_size);
632 	tpa->tpa_min_size_to_start = cpu_to_le16(param->tpa_min_size_to_start);
633 	tpa->tpa_min_size_to_cont = cpu_to_le16(param->tpa_min_size_to_cont);
634 }
635 
636 static void
637 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
638 			struct vport_update_ramrod_data *p_ramrod,
639 			struct qed_sp_vport_update_params *p_params)
640 {
641 	int i;
642 
643 	memset(&p_ramrod->approx_mcast.bins, 0,
644 	       sizeof(p_ramrod->approx_mcast.bins));
645 
646 	if (!p_params->update_approx_mcast_flg)
647 		return;
648 
649 	p_ramrod->common.update_approx_mcast_flg = 1;
650 	for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
651 		u32 *p_bins = p_params->bins;
652 
653 		p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
654 	}
655 }
656 
657 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
658 			struct qed_sp_vport_update_params *p_params,
659 			enum spq_mode comp_mode,
660 			struct qed_spq_comp_cb *p_comp_data)
661 {
662 	struct qed_rss_params *p_rss_params = p_params->rss_params;
663 	struct vport_update_ramrod_data_cmn *p_cmn;
664 	struct qed_sp_init_data init_data;
665 	struct vport_update_ramrod_data *p_ramrod = NULL;
666 	struct qed_spq_entry *p_ent = NULL;
667 	u8 abs_vport_id = 0, val;
668 	int rc = -EINVAL;
669 
670 	if (IS_VF(p_hwfn->cdev)) {
671 		rc = qed_vf_pf_vport_update(p_hwfn, p_params);
672 		return rc;
673 	}
674 
675 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
676 	if (rc)
677 		return rc;
678 
679 	memset(&init_data, 0, sizeof(init_data));
680 	init_data.cid = qed_spq_get_cid(p_hwfn);
681 	init_data.opaque_fid = p_params->opaque_fid;
682 	init_data.comp_mode = comp_mode;
683 	init_data.p_comp_data = p_comp_data;
684 
685 	rc = qed_sp_init_request(p_hwfn, &p_ent,
686 				 ETH_RAMROD_VPORT_UPDATE,
687 				 PROTOCOLID_ETH, &init_data);
688 	if (rc)
689 		return rc;
690 
691 	/* Copy input params to ramrod according to FW struct */
692 	p_ramrod = &p_ent->ramrod.vport_update;
693 	p_cmn = &p_ramrod->common;
694 
695 	p_cmn->vport_id = abs_vport_id;
696 	p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
697 	p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
698 	p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
699 	p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
700 	p_cmn->accept_any_vlan = p_params->accept_any_vlan;
701 	val = p_params->update_accept_any_vlan_flg;
702 	p_cmn->update_accept_any_vlan_flg = val;
703 
704 	p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
705 	val = p_params->update_inner_vlan_removal_flg;
706 	p_cmn->update_inner_vlan_removal_en_flg = val;
707 
708 	p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
709 	val = p_params->update_default_vlan_enable_flg;
710 	p_cmn->update_default_vlan_en_flg = val;
711 
712 	p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
713 	p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
714 
715 	p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
716 
717 	p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
718 	p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
719 
720 	p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
721 	val = p_params->update_anti_spoofing_en_flg;
722 	p_ramrod->common.update_anti_spoofing_en_flg = val;
723 
724 	rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
725 	if (rc) {
726 		qed_sp_destroy_request(p_hwfn, p_ent);
727 		return rc;
728 	}
729 
730 	if (p_params->update_ctl_frame_check) {
731 		p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
732 		p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
733 	}
734 
735 	/* Update mcast bins for VFs, PF doesn't use this functionality */
736 	qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
737 
738 	qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
739 	qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
740 	return qed_spq_post(p_hwfn, p_ent, NULL);
741 }
742 
743 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
744 {
745 	struct vport_stop_ramrod_data *p_ramrod;
746 	struct qed_sp_init_data init_data;
747 	struct qed_spq_entry *p_ent;
748 	u8 abs_vport_id = 0;
749 	int rc;
750 
751 	if (IS_VF(p_hwfn->cdev))
752 		return qed_vf_pf_vport_stop(p_hwfn);
753 
754 	rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
755 	if (rc)
756 		return rc;
757 
758 	memset(&init_data, 0, sizeof(init_data));
759 	init_data.cid = qed_spq_get_cid(p_hwfn);
760 	init_data.opaque_fid = opaque_fid;
761 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
762 
763 	rc = qed_sp_init_request(p_hwfn, &p_ent,
764 				 ETH_RAMROD_VPORT_STOP,
765 				 PROTOCOLID_ETH, &init_data);
766 	if (rc)
767 		return rc;
768 
769 	p_ramrod = &p_ent->ramrod.vport_stop;
770 	p_ramrod->vport_id = abs_vport_id;
771 
772 	return qed_spq_post(p_hwfn, p_ent, NULL);
773 }
774 
775 static int
776 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
777 		       struct qed_filter_accept_flags *p_accept_flags)
778 {
779 	struct qed_sp_vport_update_params s_params;
780 
781 	memset(&s_params, 0, sizeof(s_params));
782 	memcpy(&s_params.accept_flags, p_accept_flags,
783 	       sizeof(struct qed_filter_accept_flags));
784 
785 	return qed_vf_pf_vport_update(p_hwfn, &s_params);
786 }
787 
788 static int qed_filter_accept_cmd(struct qed_dev *cdev,
789 				 u8 vport,
790 				 struct qed_filter_accept_flags accept_flags,
791 				 u8 update_accept_any_vlan,
792 				 u8 accept_any_vlan,
793 				 enum spq_mode comp_mode,
794 				 struct qed_spq_comp_cb *p_comp_data)
795 {
796 	struct qed_sp_vport_update_params vport_update_params;
797 	int i, rc;
798 
799 	/* Prepare and send the vport rx_mode change */
800 	memset(&vport_update_params, 0, sizeof(vport_update_params));
801 	vport_update_params.vport_id = vport;
802 	vport_update_params.accept_flags = accept_flags;
803 	vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
804 	vport_update_params.accept_any_vlan = accept_any_vlan;
805 
806 	for_each_hwfn(cdev, i) {
807 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
808 
809 		vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
810 
811 		if (IS_VF(cdev)) {
812 			rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
813 			if (rc)
814 				return rc;
815 			continue;
816 		}
817 
818 		rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
819 					 comp_mode, p_comp_data);
820 		if (rc) {
821 			DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
822 			return rc;
823 		}
824 
825 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
826 			   "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
827 			   accept_flags.rx_accept_filter,
828 			   accept_flags.tx_accept_filter);
829 		if (update_accept_any_vlan)
830 			DP_VERBOSE(p_hwfn, QED_MSG_SP,
831 				   "accept_any_vlan=%d configured\n",
832 				   accept_any_vlan);
833 	}
834 
835 	return 0;
836 }
837 
838 int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
839 			     struct qed_queue_cid *p_cid,
840 			     u16 bd_max_bytes,
841 			     dma_addr_t bd_chain_phys_addr,
842 			     dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
843 {
844 	struct rx_queue_start_ramrod_data *p_ramrod = NULL;
845 	struct qed_spq_entry *p_ent = NULL;
846 	struct qed_sp_init_data init_data;
847 	int rc = -EINVAL;
848 
849 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
850 		   "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
851 		   p_cid->opaque_fid, p_cid->cid,
852 		   p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id);
853 
854 	/* Get SPQ entry */
855 	memset(&init_data, 0, sizeof(init_data));
856 	init_data.cid = p_cid->cid;
857 	init_data.opaque_fid = p_cid->opaque_fid;
858 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
859 
860 	rc = qed_sp_init_request(p_hwfn, &p_ent,
861 				 ETH_RAMROD_RX_QUEUE_START,
862 				 PROTOCOLID_ETH, &init_data);
863 	if (rc)
864 		return rc;
865 
866 	p_ramrod = &p_ent->ramrod.rx_queue_start;
867 
868 	p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
869 	p_ramrod->sb_index = p_cid->sb_idx;
870 	p_ramrod->vport_id = p_cid->abs.vport_id;
871 	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
872 	p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
873 	p_ramrod->complete_cqe_flg = 0;
874 	p_ramrod->complete_event_flg = 1;
875 
876 	p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
877 	DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
878 
879 	p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
880 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
881 
882 	if (p_cid->vfid != QED_QUEUE_CID_SELF) {
883 		bool b_legacy_vf = !!(p_cid->vf_legacy &
884 				      QED_QCID_LEGACY_VF_RX_PROD);
885 
886 		p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
887 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
888 			   "Queue%s is meant for VF rxq[%02x]\n",
889 			   b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid);
890 		p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
891 	}
892 
893 	return qed_spq_post(p_hwfn, p_ent, NULL);
894 }
895 
896 static int
897 qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
898 			  struct qed_queue_cid *p_cid,
899 			  u16 bd_max_bytes,
900 			  dma_addr_t bd_chain_phys_addr,
901 			  dma_addr_t cqe_pbl_addr,
902 			  u16 cqe_pbl_size, void __iomem **pp_prod)
903 {
904 	u32 init_prod_val = 0;
905 
906 	*pp_prod = p_hwfn->regview +
907 		   GTT_BAR0_MAP_REG_MSDM_RAM +
908 		    MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
909 
910 	/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
911 	__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
912 			  (u32 *)(&init_prod_val));
913 
914 	return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
915 					bd_max_bytes,
916 					bd_chain_phys_addr,
917 					cqe_pbl_addr, cqe_pbl_size);
918 }
919 
920 static int
921 qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
922 		       u16 opaque_fid,
923 		       struct qed_queue_start_common_params *p_params,
924 		       u16 bd_max_bytes,
925 		       dma_addr_t bd_chain_phys_addr,
926 		       dma_addr_t cqe_pbl_addr,
927 		       u16 cqe_pbl_size,
928 		       struct qed_rxq_start_ret_params *p_ret_params)
929 {
930 	struct qed_queue_cid *p_cid;
931 	int rc;
932 
933 	/* Allocate a CID for the queue */
934 	p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
935 	if (!p_cid)
936 		return -ENOMEM;
937 
938 	if (IS_PF(p_hwfn->cdev)) {
939 		rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
940 					       bd_max_bytes,
941 					       bd_chain_phys_addr,
942 					       cqe_pbl_addr, cqe_pbl_size,
943 					       &p_ret_params->p_prod);
944 	} else {
945 		rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
946 					 bd_max_bytes,
947 					 bd_chain_phys_addr,
948 					 cqe_pbl_addr,
949 					 cqe_pbl_size, &p_ret_params->p_prod);
950 	}
951 
952 	/* Provide the caller with a reference to as handler */
953 	if (rc)
954 		qed_eth_queue_cid_release(p_hwfn, p_cid);
955 	else
956 		p_ret_params->p_handle = (void *)p_cid;
957 
958 	return rc;
959 }
960 
961 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
962 				void **pp_rxq_handles,
963 				u8 num_rxqs,
964 				u8 complete_cqe_flg,
965 				u8 complete_event_flg,
966 				enum spq_mode comp_mode,
967 				struct qed_spq_comp_cb *p_comp_data)
968 {
969 	struct rx_queue_update_ramrod_data *p_ramrod = NULL;
970 	struct qed_spq_entry *p_ent = NULL;
971 	struct qed_sp_init_data init_data;
972 	struct qed_queue_cid *p_cid;
973 	int rc = -EINVAL;
974 	u8 i;
975 
976 	memset(&init_data, 0, sizeof(init_data));
977 	init_data.comp_mode = comp_mode;
978 	init_data.p_comp_data = p_comp_data;
979 
980 	for (i = 0; i < num_rxqs; i++) {
981 		p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
982 
983 		/* Get SPQ entry */
984 		init_data.cid = p_cid->cid;
985 		init_data.opaque_fid = p_cid->opaque_fid;
986 
987 		rc = qed_sp_init_request(p_hwfn, &p_ent,
988 					 ETH_RAMROD_RX_QUEUE_UPDATE,
989 					 PROTOCOLID_ETH, &init_data);
990 		if (rc)
991 			return rc;
992 
993 		p_ramrod = &p_ent->ramrod.rx_queue_update;
994 		p_ramrod->vport_id = p_cid->abs.vport_id;
995 
996 		p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
997 		p_ramrod->complete_cqe_flg = complete_cqe_flg;
998 		p_ramrod->complete_event_flg = complete_event_flg;
999 
1000 		rc = qed_spq_post(p_hwfn, p_ent, NULL);
1001 		if (rc)
1002 			return rc;
1003 	}
1004 
1005 	return rc;
1006 }
1007 
1008 static int
1009 qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
1010 			 struct qed_queue_cid *p_cid,
1011 			 bool b_eq_completion_only, bool b_cqe_completion)
1012 {
1013 	struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
1014 	struct qed_spq_entry *p_ent = NULL;
1015 	struct qed_sp_init_data init_data;
1016 	int rc;
1017 
1018 	memset(&init_data, 0, sizeof(init_data));
1019 	init_data.cid = p_cid->cid;
1020 	init_data.opaque_fid = p_cid->opaque_fid;
1021 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1022 
1023 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1024 				 ETH_RAMROD_RX_QUEUE_STOP,
1025 				 PROTOCOLID_ETH, &init_data);
1026 	if (rc)
1027 		return rc;
1028 
1029 	p_ramrod = &p_ent->ramrod.rx_queue_stop;
1030 	p_ramrod->vport_id = p_cid->abs.vport_id;
1031 	p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1032 
1033 	/* Cleaning the queue requires the completion to arrive there.
1034 	 * In addition, VFs require the answer to come as eqe to PF.
1035 	 */
1036 	p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
1037 				      !b_eq_completion_only) ||
1038 				     b_cqe_completion;
1039 	p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) ||
1040 				       b_eq_completion_only;
1041 
1042 	return qed_spq_post(p_hwfn, p_ent, NULL);
1043 }
1044 
1045 int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
1046 			  void *p_rxq,
1047 			  bool eq_completion_only, bool cqe_completion)
1048 {
1049 	struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
1050 	int rc = -EINVAL;
1051 
1052 	if (IS_PF(p_hwfn->cdev))
1053 		rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1054 					      eq_completion_only,
1055 					      cqe_completion);
1056 	else
1057 		rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1058 
1059 	if (!rc)
1060 		qed_eth_queue_cid_release(p_hwfn, p_cid);
1061 	return rc;
1062 }
1063 
1064 int
1065 qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
1066 			 struct qed_queue_cid *p_cid,
1067 			 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
1068 {
1069 	struct tx_queue_start_ramrod_data *p_ramrod = NULL;
1070 	struct qed_spq_entry *p_ent = NULL;
1071 	struct qed_sp_init_data init_data;
1072 	int rc = -EINVAL;
1073 
1074 	/* Get SPQ entry */
1075 	memset(&init_data, 0, sizeof(init_data));
1076 	init_data.cid = p_cid->cid;
1077 	init_data.opaque_fid = p_cid->opaque_fid;
1078 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1079 
1080 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1081 				 ETH_RAMROD_TX_QUEUE_START,
1082 				 PROTOCOLID_ETH, &init_data);
1083 	if (rc)
1084 		return rc;
1085 
1086 	p_ramrod = &p_ent->ramrod.tx_queue_start;
1087 	p_ramrod->vport_id = p_cid->abs.vport_id;
1088 
1089 	p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
1090 	p_ramrod->sb_index = p_cid->sb_idx;
1091 	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1092 
1093 	p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
1094 	p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
1095 
1096 	p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1097 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1098 
1099 	p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1100 
1101 	return qed_spq_post(p_hwfn, p_ent, NULL);
1102 }
1103 
1104 static int
1105 qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
1106 			  struct qed_queue_cid *p_cid,
1107 			  u8 tc,
1108 			  dma_addr_t pbl_addr,
1109 			  u16 pbl_size, void __iomem **pp_doorbell)
1110 {
1111 	int rc;
1112 
1113 
1114 	rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
1115 				      pbl_addr, pbl_size,
1116 				      qed_get_cm_pq_idx_mcos(p_hwfn, tc));
1117 	if (rc)
1118 		return rc;
1119 
1120 	/* Provide the caller with the necessary return values */
1121 	*pp_doorbell = p_hwfn->doorbells +
1122 		       qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
1123 
1124 	return 0;
1125 }
1126 
1127 static int
1128 qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
1129 		       u16 opaque_fid,
1130 		       struct qed_queue_start_common_params *p_params,
1131 		       u8 tc,
1132 		       dma_addr_t pbl_addr,
1133 		       u16 pbl_size,
1134 		       struct qed_txq_start_ret_params *p_ret_params)
1135 {
1136 	struct qed_queue_cid *p_cid;
1137 	int rc;
1138 
1139 	p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
1140 	if (!p_cid)
1141 		return -EINVAL;
1142 
1143 	if (IS_PF(p_hwfn->cdev))
1144 		rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1145 					       pbl_addr, pbl_size,
1146 					       &p_ret_params->p_doorbell);
1147 	else
1148 		rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
1149 					 pbl_addr, pbl_size,
1150 					 &p_ret_params->p_doorbell);
1151 
1152 	if (rc)
1153 		qed_eth_queue_cid_release(p_hwfn, p_cid);
1154 	else
1155 		p_ret_params->p_handle = (void *)p_cid;
1156 
1157 	return rc;
1158 }
1159 
1160 static int
1161 qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
1162 {
1163 	struct qed_spq_entry *p_ent = NULL;
1164 	struct qed_sp_init_data init_data;
1165 	int rc;
1166 
1167 	memset(&init_data, 0, sizeof(init_data));
1168 	init_data.cid = p_cid->cid;
1169 	init_data.opaque_fid = p_cid->opaque_fid;
1170 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1171 
1172 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1173 				 ETH_RAMROD_TX_QUEUE_STOP,
1174 				 PROTOCOLID_ETH, &init_data);
1175 	if (rc)
1176 		return rc;
1177 
1178 	return qed_spq_post(p_hwfn, p_ent, NULL);
1179 }
1180 
1181 int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
1182 {
1183 	struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
1184 	int rc;
1185 
1186 	if (IS_PF(p_hwfn->cdev))
1187 		rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1188 	else
1189 		rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
1190 
1191 	if (!rc)
1192 		qed_eth_queue_cid_release(p_hwfn, p_cid);
1193 	return rc;
1194 }
1195 
1196 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
1197 {
1198 	enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1199 
1200 	switch (opcode) {
1201 	case QED_FILTER_ADD:
1202 		action = ETH_FILTER_ACTION_ADD;
1203 		break;
1204 	case QED_FILTER_REMOVE:
1205 		action = ETH_FILTER_ACTION_REMOVE;
1206 		break;
1207 	case QED_FILTER_FLUSH:
1208 		action = ETH_FILTER_ACTION_REMOVE_ALL;
1209 		break;
1210 	default:
1211 		action = MAX_ETH_FILTER_ACTION;
1212 	}
1213 
1214 	return action;
1215 }
1216 
1217 static int
1218 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
1219 			u16 opaque_fid,
1220 			struct qed_filter_ucast *p_filter_cmd,
1221 			struct vport_filter_update_ramrod_data **pp_ramrod,
1222 			struct qed_spq_entry **pp_ent,
1223 			enum spq_mode comp_mode,
1224 			struct qed_spq_comp_cb *p_comp_data)
1225 {
1226 	u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1227 	struct vport_filter_update_ramrod_data *p_ramrod;
1228 	struct eth_filter_cmd *p_first_filter;
1229 	struct eth_filter_cmd *p_second_filter;
1230 	struct qed_sp_init_data init_data;
1231 	enum eth_filter_action action;
1232 	int rc;
1233 
1234 	rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1235 			  &vport_to_remove_from);
1236 	if (rc)
1237 		return rc;
1238 
1239 	rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1240 			  &vport_to_add_to);
1241 	if (rc)
1242 		return rc;
1243 
1244 	/* Get SPQ entry */
1245 	memset(&init_data, 0, sizeof(init_data));
1246 	init_data.cid = qed_spq_get_cid(p_hwfn);
1247 	init_data.opaque_fid = opaque_fid;
1248 	init_data.comp_mode = comp_mode;
1249 	init_data.p_comp_data = p_comp_data;
1250 
1251 	rc = qed_sp_init_request(p_hwfn, pp_ent,
1252 				 ETH_RAMROD_FILTERS_UPDATE,
1253 				 PROTOCOLID_ETH, &init_data);
1254 	if (rc)
1255 		return rc;
1256 
1257 	*pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1258 	p_ramrod = *pp_ramrod;
1259 	p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1260 	p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1261 
1262 	switch (p_filter_cmd->opcode) {
1263 	case QED_FILTER_REPLACE:
1264 	case QED_FILTER_MOVE:
1265 		p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
1266 	default:
1267 		p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
1268 	}
1269 
1270 	p_first_filter	= &p_ramrod->filter_cmds[0];
1271 	p_second_filter = &p_ramrod->filter_cmds[1];
1272 
1273 	switch (p_filter_cmd->type) {
1274 	case QED_FILTER_MAC:
1275 		p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
1276 	case QED_FILTER_VLAN:
1277 		p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
1278 	case QED_FILTER_MAC_VLAN:
1279 		p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
1280 	case QED_FILTER_INNER_MAC:
1281 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
1282 	case QED_FILTER_INNER_VLAN:
1283 		p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
1284 	case QED_FILTER_INNER_PAIR:
1285 		p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
1286 	case QED_FILTER_INNER_MAC_VNI_PAIR:
1287 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1288 		break;
1289 	case QED_FILTER_MAC_VNI_PAIR:
1290 		p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1291 	case QED_FILTER_VNI:
1292 		p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1293 	}
1294 
1295 	if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1296 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1297 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1298 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1299 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1300 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
1301 		qed_set_fw_mac_addr(&p_first_filter->mac_msb,
1302 				    &p_first_filter->mac_mid,
1303 				    &p_first_filter->mac_lsb,
1304 				    (u8 *)p_filter_cmd->mac);
1305 	}
1306 
1307 	if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1308 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1309 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1310 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1311 		p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
1312 
1313 	if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1314 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1315 	    (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1316 		p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
1317 
1318 	if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
1319 		p_second_filter->type = p_first_filter->type;
1320 		p_second_filter->mac_msb = p_first_filter->mac_msb;
1321 		p_second_filter->mac_mid = p_first_filter->mac_mid;
1322 		p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1323 		p_second_filter->vlan_id = p_first_filter->vlan_id;
1324 		p_second_filter->vni = p_first_filter->vni;
1325 
1326 		p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1327 
1328 		p_first_filter->vport_id = vport_to_remove_from;
1329 
1330 		p_second_filter->action = ETH_FILTER_ACTION_ADD;
1331 		p_second_filter->vport_id = vport_to_add_to;
1332 	} else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
1333 		p_first_filter->vport_id = vport_to_add_to;
1334 		memcpy(p_second_filter, p_first_filter,
1335 		       sizeof(*p_second_filter));
1336 		p_first_filter->action	= ETH_FILTER_ACTION_REMOVE_ALL;
1337 		p_second_filter->action = ETH_FILTER_ACTION_ADD;
1338 	} else {
1339 		action = qed_filter_action(p_filter_cmd->opcode);
1340 
1341 		if (action == MAX_ETH_FILTER_ACTION) {
1342 			DP_NOTICE(p_hwfn,
1343 				  "%d is not supported yet\n",
1344 				  p_filter_cmd->opcode);
1345 			qed_sp_destroy_request(p_hwfn, *pp_ent);
1346 			return -EINVAL;
1347 		}
1348 
1349 		p_first_filter->action = action;
1350 		p_first_filter->vport_id = (p_filter_cmd->opcode ==
1351 					    QED_FILTER_REMOVE) ?
1352 					   vport_to_remove_from :
1353 					   vport_to_add_to;
1354 	}
1355 
1356 	return 0;
1357 }
1358 
1359 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1360 			    u16 opaque_fid,
1361 			    struct qed_filter_ucast *p_filter_cmd,
1362 			    enum spq_mode comp_mode,
1363 			    struct qed_spq_comp_cb *p_comp_data)
1364 {
1365 	struct vport_filter_update_ramrod_data	*p_ramrod	= NULL;
1366 	struct qed_spq_entry			*p_ent		= NULL;
1367 	struct eth_filter_cmd_header		*p_header;
1368 	int					rc;
1369 
1370 	rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1371 				     &p_ramrod, &p_ent,
1372 				     comp_mode, p_comp_data);
1373 	if (rc) {
1374 		DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1375 		return rc;
1376 	}
1377 	p_header = &p_ramrod->filter_cmd_hdr;
1378 	p_header->assert_on_error = p_filter_cmd->assert_on_error;
1379 
1380 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1381 	if (rc) {
1382 		DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1383 		return rc;
1384 	}
1385 
1386 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
1387 		   "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1388 		   (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1389 		   ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1390 		   "REMOVE" :
1391 		   ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1392 		    "MOVE" : "REPLACE")),
1393 		   (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1394 		   ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1395 		    "VLAN" : "MAC & VLAN"),
1396 		   p_ramrod->filter_cmd_hdr.cmd_cnt,
1397 		   p_filter_cmd->is_rx_filter,
1398 		   p_filter_cmd->is_tx_filter);
1399 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
1400 		   "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1401 		   p_filter_cmd->vport_to_add_to,
1402 		   p_filter_cmd->vport_to_remove_from,
1403 		   p_filter_cmd->mac[0],
1404 		   p_filter_cmd->mac[1],
1405 		   p_filter_cmd->mac[2],
1406 		   p_filter_cmd->mac[3],
1407 		   p_filter_cmd->mac[4],
1408 		   p_filter_cmd->mac[5],
1409 		   p_filter_cmd->vlan);
1410 
1411 	return 0;
1412 }
1413 
1414 /*******************************************************************************
1415  * Description:
1416  *         Calculates crc 32 on a buffer
1417  *         Note: crc32_length MUST be aligned to 8
1418  * Return:
1419  ******************************************************************************/
1420 static u32 qed_calc_crc32c(u8 *crc32_packet,
1421 			   u32 crc32_length, u32 crc32_seed, u8 complement)
1422 {
1423 	u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1424 	u8 msb = 0, current_byte = 0;
1425 
1426 	if ((!crc32_packet) ||
1427 	    (crc32_length == 0) ||
1428 	    ((crc32_length % 8) != 0))
1429 		return crc32_result;
1430 	for (byte = 0; byte < crc32_length; byte++) {
1431 		current_byte = crc32_packet[byte];
1432 		for (bit = 0; bit < 8; bit++) {
1433 			msb = (u8)(crc32_result >> 31);
1434 			crc32_result = crc32_result << 1;
1435 			if (msb != (0x1 & (current_byte >> bit))) {
1436 				crc32_result = crc32_result ^ CRC32_POLY;
1437 				crc32_result |= 1; /*crc32_result[0] = 1;*/
1438 			}
1439 		}
1440 	}
1441 	return crc32_result;
1442 }
1443 
1444 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
1445 {
1446 	u32 packet_buf[2] = { 0 };
1447 
1448 	memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1449 	return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1450 }
1451 
1452 u8 qed_mcast_bin_from_mac(u8 *mac)
1453 {
1454 	u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1455 				mac, ETH_ALEN);
1456 
1457 	return crc & 0xff;
1458 }
1459 
1460 static int
1461 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1462 			u16 opaque_fid,
1463 			struct qed_filter_mcast *p_filter_cmd,
1464 			enum spq_mode comp_mode,
1465 			struct qed_spq_comp_cb *p_comp_data)
1466 {
1467 	struct vport_update_ramrod_data *p_ramrod = NULL;
1468 	u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1469 	struct qed_spq_entry *p_ent = NULL;
1470 	struct qed_sp_init_data init_data;
1471 	u8 abs_vport_id = 0;
1472 	int rc, i;
1473 
1474 	if (p_filter_cmd->opcode == QED_FILTER_ADD)
1475 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1476 				  &abs_vport_id);
1477 	else
1478 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1479 				  &abs_vport_id);
1480 	if (rc)
1481 		return rc;
1482 
1483 	/* Get SPQ entry */
1484 	memset(&init_data, 0, sizeof(init_data));
1485 	init_data.cid = qed_spq_get_cid(p_hwfn);
1486 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1487 	init_data.comp_mode = comp_mode;
1488 	init_data.p_comp_data = p_comp_data;
1489 
1490 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1491 				 ETH_RAMROD_VPORT_UPDATE,
1492 				 PROTOCOLID_ETH, &init_data);
1493 	if (rc) {
1494 		DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1495 		return rc;
1496 	}
1497 
1498 	p_ramrod = &p_ent->ramrod.vport_update;
1499 	p_ramrod->common.update_approx_mcast_flg = 1;
1500 
1501 	/* explicitly clear out the entire vector */
1502 	memset(&p_ramrod->approx_mcast.bins, 0,
1503 	       sizeof(p_ramrod->approx_mcast.bins));
1504 	memset(bins, 0, sizeof(bins));
1505 	/* filter ADD op is explicit set op and it removes
1506 	 *  any existing filters for the vport
1507 	 */
1508 	if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1509 		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1510 			u32 bit, nbits;
1511 
1512 			bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1513 			nbits = sizeof(u32) * BITS_PER_BYTE;
1514 			bins[bit / nbits] |= 1 << (bit % nbits);
1515 		}
1516 
1517 		/* Convert to correct endianity */
1518 		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1519 			struct vport_update_ramrod_mcast *p_ramrod_bins;
1520 
1521 			p_ramrod_bins = &p_ramrod->approx_mcast;
1522 			p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
1523 		}
1524 	}
1525 
1526 	p_ramrod->common.vport_id = abs_vport_id;
1527 
1528 	return qed_spq_post(p_hwfn, p_ent, NULL);
1529 }
1530 
1531 static int qed_filter_mcast_cmd(struct qed_dev *cdev,
1532 				struct qed_filter_mcast *p_filter_cmd,
1533 				enum spq_mode comp_mode,
1534 				struct qed_spq_comp_cb *p_comp_data)
1535 {
1536 	int rc = 0;
1537 	int i;
1538 
1539 	/* only ADD and REMOVE operations are supported for multi-cast */
1540 	if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1541 	     (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1542 	    (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1543 		return -EINVAL;
1544 
1545 	for_each_hwfn(cdev, i) {
1546 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1547 
1548 		u16 opaque_fid;
1549 
1550 		if (IS_VF(cdev)) {
1551 			qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1552 			continue;
1553 		}
1554 
1555 		opaque_fid = p_hwfn->hw_info.opaque_fid;
1556 
1557 		rc = qed_sp_eth_filter_mcast(p_hwfn,
1558 					     opaque_fid,
1559 					     p_filter_cmd,
1560 					     comp_mode, p_comp_data);
1561 	}
1562 	return rc;
1563 }
1564 
1565 static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1566 				struct qed_filter_ucast *p_filter_cmd,
1567 				enum spq_mode comp_mode,
1568 				struct qed_spq_comp_cb *p_comp_data)
1569 {
1570 	int rc = 0;
1571 	int i;
1572 
1573 	for_each_hwfn(cdev, i) {
1574 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1575 		u16 opaque_fid;
1576 
1577 		if (IS_VF(cdev)) {
1578 			rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1579 			continue;
1580 		}
1581 
1582 		opaque_fid = p_hwfn->hw_info.opaque_fid;
1583 
1584 		rc = qed_sp_eth_filter_ucast(p_hwfn,
1585 					     opaque_fid,
1586 					     p_filter_cmd,
1587 					     comp_mode, p_comp_data);
1588 		if (rc)
1589 			break;
1590 	}
1591 
1592 	return rc;
1593 }
1594 
1595 /* Statistics related code */
1596 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1597 					   u32 *p_addr,
1598 					   u32 *p_len, u16 statistics_bin)
1599 {
1600 	if (IS_PF(p_hwfn->cdev)) {
1601 		*p_addr = BAR0_MAP_REG_PSDM_RAM +
1602 		    PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1603 		*p_len = sizeof(struct eth_pstorm_per_queue_stat);
1604 	} else {
1605 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1606 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1607 
1608 		*p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1609 		*p_len = p_resp->pfdev_info.stats_info.pstats.len;
1610 	}
1611 }
1612 
1613 static noinline_for_stack void
1614 __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1615 		       struct qed_eth_stats *p_stats, u16 statistics_bin)
1616 {
1617 	struct eth_pstorm_per_queue_stat pstats;
1618 	u32 pstats_addr = 0, pstats_len = 0;
1619 
1620 	__qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1621 				       statistics_bin);
1622 
1623 	memset(&pstats, 0, sizeof(pstats));
1624 	qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1625 
1626 	p_stats->common.tx_ucast_bytes +=
1627 	    HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1628 	p_stats->common.tx_mcast_bytes +=
1629 	    HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1630 	p_stats->common.tx_bcast_bytes +=
1631 	    HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1632 	p_stats->common.tx_ucast_pkts +=
1633 	    HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1634 	p_stats->common.tx_mcast_pkts +=
1635 	    HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1636 	p_stats->common.tx_bcast_pkts +=
1637 	    HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1638 	p_stats->common.tx_err_drop_pkts +=
1639 	    HILO_64_REGPAIR(pstats.error_drop_pkts);
1640 }
1641 
1642 static noinline_for_stack void
1643 __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1644 		       struct qed_eth_stats *p_stats, u16 statistics_bin)
1645 {
1646 	struct tstorm_per_port_stat tstats;
1647 	u32 tstats_addr, tstats_len;
1648 
1649 	if (IS_PF(p_hwfn->cdev)) {
1650 		tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1651 		    TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1652 		tstats_len = sizeof(struct tstorm_per_port_stat);
1653 	} else {
1654 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1655 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1656 
1657 		tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1658 		tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1659 	}
1660 
1661 	memset(&tstats, 0, sizeof(tstats));
1662 	qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1663 
1664 	p_stats->common.mftag_filter_discards +=
1665 	    HILO_64_REGPAIR(tstats.mftag_filter_discard);
1666 	p_stats->common.mac_filter_discards +=
1667 	    HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1668 	p_stats->common.gft_filter_drop +=
1669 		HILO_64_REGPAIR(tstats.eth_gft_drop_pkt);
1670 }
1671 
1672 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1673 					   u32 *p_addr,
1674 					   u32 *p_len, u16 statistics_bin)
1675 {
1676 	if (IS_PF(p_hwfn->cdev)) {
1677 		*p_addr = BAR0_MAP_REG_USDM_RAM +
1678 		    USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1679 		*p_len = sizeof(struct eth_ustorm_per_queue_stat);
1680 	} else {
1681 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1682 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1683 
1684 		*p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1685 		*p_len = p_resp->pfdev_info.stats_info.ustats.len;
1686 	}
1687 }
1688 
1689 static noinline_for_stack
1690 void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1691 			    struct qed_eth_stats *p_stats, u16 statistics_bin)
1692 {
1693 	struct eth_ustorm_per_queue_stat ustats;
1694 	u32 ustats_addr = 0, ustats_len = 0;
1695 
1696 	__qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1697 				       statistics_bin);
1698 
1699 	memset(&ustats, 0, sizeof(ustats));
1700 	qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1701 
1702 	p_stats->common.rx_ucast_bytes +=
1703 	    HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1704 	p_stats->common.rx_mcast_bytes +=
1705 	    HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1706 	p_stats->common.rx_bcast_bytes +=
1707 	    HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1708 	p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1709 	p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1710 	p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1711 }
1712 
1713 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1714 					   u32 *p_addr,
1715 					   u32 *p_len, u16 statistics_bin)
1716 {
1717 	if (IS_PF(p_hwfn->cdev)) {
1718 		*p_addr = BAR0_MAP_REG_MSDM_RAM +
1719 		    MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1720 		*p_len = sizeof(struct eth_mstorm_per_queue_stat);
1721 	} else {
1722 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1723 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1724 
1725 		*p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1726 		*p_len = p_resp->pfdev_info.stats_info.mstats.len;
1727 	}
1728 }
1729 
1730 static noinline_for_stack void
1731 __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1732 		       struct qed_eth_stats *p_stats, u16 statistics_bin)
1733 {
1734 	struct eth_mstorm_per_queue_stat mstats;
1735 	u32 mstats_addr = 0, mstats_len = 0;
1736 
1737 	__qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1738 				       statistics_bin);
1739 
1740 	memset(&mstats, 0, sizeof(mstats));
1741 	qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1742 
1743 	p_stats->common.no_buff_discards +=
1744 	    HILO_64_REGPAIR(mstats.no_buff_discard);
1745 	p_stats->common.packet_too_big_discard +=
1746 	    HILO_64_REGPAIR(mstats.packet_too_big_discard);
1747 	p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1748 	p_stats->common.tpa_coalesced_pkts +=
1749 	    HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1750 	p_stats->common.tpa_coalesced_events +=
1751 	    HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1752 	p_stats->common.tpa_aborts_num +=
1753 	    HILO_64_REGPAIR(mstats.tpa_aborts_num);
1754 	p_stats->common.tpa_coalesced_bytes +=
1755 	    HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1756 }
1757 
1758 static noinline_for_stack void
1759 __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1760 			   struct qed_eth_stats *p_stats)
1761 {
1762 	struct qed_eth_stats_common *p_common = &p_stats->common;
1763 	struct port_stats port_stats;
1764 	int j;
1765 
1766 	memset(&port_stats, 0, sizeof(port_stats));
1767 
1768 	qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1769 			p_hwfn->mcp_info->port_addr +
1770 			offsetof(struct public_port, stats),
1771 			sizeof(port_stats));
1772 
1773 	p_common->rx_64_byte_packets += port_stats.eth.r64;
1774 	p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1775 	p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1776 	p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1777 	p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1778 	p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1779 	p_common->rx_crc_errors += port_stats.eth.rfcs;
1780 	p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1781 	p_common->rx_pause_frames += port_stats.eth.rxpf;
1782 	p_common->rx_pfc_frames += port_stats.eth.rxpp;
1783 	p_common->rx_align_errors += port_stats.eth.raln;
1784 	p_common->rx_carrier_errors += port_stats.eth.rfcr;
1785 	p_common->rx_oversize_packets += port_stats.eth.rovr;
1786 	p_common->rx_jabbers += port_stats.eth.rjbr;
1787 	p_common->rx_undersize_packets += port_stats.eth.rund;
1788 	p_common->rx_fragments += port_stats.eth.rfrg;
1789 	p_common->tx_64_byte_packets += port_stats.eth.t64;
1790 	p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1791 	p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1792 	p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1793 	p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1794 	p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1795 	p_common->tx_pause_frames += port_stats.eth.txpf;
1796 	p_common->tx_pfc_frames += port_stats.eth.txpp;
1797 	p_common->rx_mac_bytes += port_stats.eth.rbyte;
1798 	p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1799 	p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1800 	p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1801 	p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1802 	p_common->tx_mac_bytes += port_stats.eth.tbyte;
1803 	p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1804 	p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1805 	p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1806 	p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1807 	for (j = 0; j < 8; j++) {
1808 		p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1809 		p_common->brb_discards += port_stats.brb.brb_discard[j];
1810 	}
1811 
1812 	if (QED_IS_BB(p_hwfn->cdev)) {
1813 		struct qed_eth_stats_bb *p_bb = &p_stats->bb;
1814 
1815 		p_bb->rx_1519_to_1522_byte_packets +=
1816 		    port_stats.eth.u0.bb0.r1522;
1817 		p_bb->rx_1519_to_2047_byte_packets +=
1818 		    port_stats.eth.u0.bb0.r2047;
1819 		p_bb->rx_2048_to_4095_byte_packets +=
1820 		    port_stats.eth.u0.bb0.r4095;
1821 		p_bb->rx_4096_to_9216_byte_packets +=
1822 		    port_stats.eth.u0.bb0.r9216;
1823 		p_bb->rx_9217_to_16383_byte_packets +=
1824 		    port_stats.eth.u0.bb0.r16383;
1825 		p_bb->tx_1519_to_2047_byte_packets +=
1826 		    port_stats.eth.u1.bb1.t2047;
1827 		p_bb->tx_2048_to_4095_byte_packets +=
1828 		    port_stats.eth.u1.bb1.t4095;
1829 		p_bb->tx_4096_to_9216_byte_packets +=
1830 		    port_stats.eth.u1.bb1.t9216;
1831 		p_bb->tx_9217_to_16383_byte_packets +=
1832 		    port_stats.eth.u1.bb1.t16383;
1833 		p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1834 		p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1835 	} else {
1836 		struct qed_eth_stats_ah *p_ah = &p_stats->ah;
1837 
1838 		p_ah->rx_1519_to_max_byte_packets +=
1839 		    port_stats.eth.u0.ah0.r1519_to_max;
1840 		p_ah->tx_1519_to_max_byte_packets =
1841 		    port_stats.eth.u1.ah1.t1519_to_max;
1842 	}
1843 
1844 	p_common->link_change_count = qed_rd(p_hwfn, p_ptt,
1845 					     p_hwfn->mcp_info->port_addr +
1846 					     offsetof(struct public_port,
1847 						      link_change_count));
1848 }
1849 
1850 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1851 				  struct qed_ptt *p_ptt,
1852 				  struct qed_eth_stats *stats,
1853 				  u16 statistics_bin, bool b_get_port_stats)
1854 {
1855 	__qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1856 	__qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1857 	__qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1858 	__qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1859 
1860 	if (b_get_port_stats && p_hwfn->mcp_info)
1861 		__qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1862 }
1863 
1864 static void _qed_get_vport_stats(struct qed_dev *cdev,
1865 				 struct qed_eth_stats *stats)
1866 {
1867 	u8 fw_vport = 0;
1868 	int i;
1869 
1870 	memset(stats, 0, sizeof(*stats));
1871 
1872 	for_each_hwfn(cdev, i) {
1873 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1874 		struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1875 						    :  NULL;
1876 		bool b_get_port_stats;
1877 
1878 		if (IS_PF(cdev)) {
1879 			/* The main vport index is relative first */
1880 			if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1881 				DP_ERR(p_hwfn, "No vport available!\n");
1882 				goto out;
1883 			}
1884 		}
1885 
1886 		if (IS_PF(cdev) && !p_ptt) {
1887 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1888 			continue;
1889 		}
1890 
1891 		b_get_port_stats = IS_PF(cdev) && IS_LEAD_HWFN(p_hwfn);
1892 		__qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1893 				      b_get_port_stats);
1894 
1895 out:
1896 		if (IS_PF(cdev) && p_ptt)
1897 			qed_ptt_release(p_hwfn, p_ptt);
1898 	}
1899 }
1900 
1901 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
1902 {
1903 	u32 i;
1904 
1905 	if (!cdev) {
1906 		memset(stats, 0, sizeof(*stats));
1907 		return;
1908 	}
1909 
1910 	_qed_get_vport_stats(cdev, stats);
1911 
1912 	if (!cdev->reset_stats)
1913 		return;
1914 
1915 	/* Reduce the statistics baseline */
1916 	for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1917 		((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1918 }
1919 
1920 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1921 void qed_reset_vport_stats(struct qed_dev *cdev)
1922 {
1923 	int i;
1924 
1925 	for_each_hwfn(cdev, i) {
1926 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1927 		struct eth_mstorm_per_queue_stat mstats;
1928 		struct eth_ustorm_per_queue_stat ustats;
1929 		struct eth_pstorm_per_queue_stat pstats;
1930 		struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1931 						    : NULL;
1932 		u32 addr = 0, len = 0;
1933 
1934 		if (IS_PF(cdev) && !p_ptt) {
1935 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1936 			continue;
1937 		}
1938 
1939 		memset(&mstats, 0, sizeof(mstats));
1940 		__qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1941 		qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1942 
1943 		memset(&ustats, 0, sizeof(ustats));
1944 		__qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1945 		qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1946 
1947 		memset(&pstats, 0, sizeof(pstats));
1948 		__qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1949 		qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1950 
1951 		if (IS_PF(cdev))
1952 			qed_ptt_release(p_hwfn, p_ptt);
1953 	}
1954 
1955 	/* PORT statistics are not necessarily reset, so we need to
1956 	 * read and create a baseline for future statistics.
1957 	 * Link change stat is maintained by MFW, return its value as is.
1958 	 */
1959 	if (!cdev->reset_stats) {
1960 		DP_INFO(cdev, "Reset stats not allocated\n");
1961 	} else {
1962 		_qed_get_vport_stats(cdev, cdev->reset_stats);
1963 		cdev->reset_stats->common.link_change_count = 0;
1964 	}
1965 }
1966 
1967 static enum gft_profile_type
1968 qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
1969 {
1970 	if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
1971 		return GFT_PROFILE_TYPE_4_TUPLE;
1972 	if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
1973 		return GFT_PROFILE_TYPE_IP_DST_ADDR;
1974 	if (mode == QED_FILTER_CONFIG_MODE_IP_SRC)
1975 		return GFT_PROFILE_TYPE_IP_SRC_ADDR;
1976 	return GFT_PROFILE_TYPE_L4_DST_PORT;
1977 }
1978 
1979 void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
1980 			     struct qed_ptt *p_ptt,
1981 			     struct qed_arfs_config_params *p_cfg_params)
1982 {
1983 	if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
1984 		qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
1985 			       p_cfg_params->tcp,
1986 			       p_cfg_params->udp,
1987 			       p_cfg_params->ipv4,
1988 			       p_cfg_params->ipv6,
1989 			       qed_arfs_mode_to_hsi(p_cfg_params->mode));
1990 		DP_VERBOSE(p_hwfn,
1991 			   QED_MSG_SP,
1992 			   "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
1993 			   p_cfg_params->tcp ? "Enable" : "Disable",
1994 			   p_cfg_params->udp ? "Enable" : "Disable",
1995 			   p_cfg_params->ipv4 ? "Enable" : "Disable",
1996 			   p_cfg_params->ipv6 ? "Enable" : "Disable",
1997 			   (u32)p_cfg_params->mode);
1998 	} else {
1999 		DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
2000 		qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2001 	}
2002 }
2003 
2004 int
2005 qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
2006 				struct qed_spq_comp_cb *p_cb,
2007 				struct qed_ntuple_filter_params *p_params)
2008 {
2009 	struct rx_update_gft_filter_data *p_ramrod = NULL;
2010 	struct qed_spq_entry *p_ent = NULL;
2011 	struct qed_sp_init_data init_data;
2012 	u16 abs_rx_q_id = 0;
2013 	u8 abs_vport_id = 0;
2014 	int rc = -EINVAL;
2015 
2016 	/* Get SPQ entry */
2017 	memset(&init_data, 0, sizeof(init_data));
2018 	init_data.cid = qed_spq_get_cid(p_hwfn);
2019 
2020 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2021 
2022 	if (p_cb) {
2023 		init_data.comp_mode = QED_SPQ_MODE_CB;
2024 		init_data.p_comp_data = p_cb;
2025 	} else {
2026 		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2027 	}
2028 
2029 	rc = qed_sp_init_request(p_hwfn, &p_ent,
2030 				 ETH_RAMROD_GFT_UPDATE_FILTER,
2031 				 PROTOCOLID_ETH, &init_data);
2032 	if (rc)
2033 		return rc;
2034 
2035 	p_ramrod = &p_ent->ramrod.rx_update_gft;
2036 
2037 	DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
2038 	p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
2039 
2040 	if (p_params->b_is_drop) {
2041 		p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT);
2042 	} else {
2043 		rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
2044 		if (rc)
2045 			goto err;
2046 
2047 		if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
2048 			rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
2049 					     &abs_rx_q_id);
2050 			if (rc)
2051 				goto err;
2052 
2053 			p_ramrod->rx_qid_valid = 1;
2054 			p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
2055 		}
2056 
2057 		p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
2058 	}
2059 
2060 	p_ramrod->flow_id_valid = 0;
2061 	p_ramrod->flow_id = 0;
2062 	p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
2063 	    : GFT_DELETE_FILTER;
2064 
2065 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
2066 		   "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
2067 		   abs_vport_id, abs_rx_q_id,
2068 		   p_params->b_is_add ? "Adding" : "Removing",
2069 		   (u64)p_params->addr, p_params->length);
2070 
2071 	return qed_spq_post(p_hwfn, p_ent, NULL);
2072 
2073 err:
2074 	qed_sp_destroy_request(p_hwfn, p_ent);
2075 	return rc;
2076 }
2077 
2078 int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
2079 			 struct qed_ptt *p_ptt,
2080 			 struct qed_queue_cid *p_cid, u16 *p_rx_coal)
2081 {
2082 	u32 coalesce, address, is_valid;
2083 	struct cau_sb_entry sb_entry;
2084 	u8 timer_res;
2085 	int rc;
2086 
2087 	rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2088 			       p_cid->sb_igu_id * sizeof(u64),
2089 			       (u64)(uintptr_t)&sb_entry, 2, NULL);
2090 	if (rc) {
2091 		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2092 		return rc;
2093 	}
2094 
2095 	timer_res = GET_FIELD(le32_to_cpu(sb_entry.params),
2096 			      CAU_SB_ENTRY_TIMER_RES0);
2097 
2098 	address = BAR0_MAP_REG_USDM_RAM +
2099 		  USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2100 	coalesce = qed_rd(p_hwfn, p_ptt, address);
2101 
2102 	is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2103 	if (!is_valid)
2104 		return -EINVAL;
2105 
2106 	coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2107 	*p_rx_coal = (u16)(coalesce << timer_res);
2108 
2109 	return 0;
2110 }
2111 
2112 int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
2113 			 struct qed_ptt *p_ptt,
2114 			 struct qed_queue_cid *p_cid, u16 *p_tx_coal)
2115 {
2116 	u32 coalesce, address, is_valid;
2117 	struct cau_sb_entry sb_entry;
2118 	u8 timer_res;
2119 	int rc;
2120 
2121 	rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2122 			       p_cid->sb_igu_id * sizeof(u64),
2123 			       (u64)(uintptr_t)&sb_entry, 2, NULL);
2124 	if (rc) {
2125 		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2126 		return rc;
2127 	}
2128 
2129 	timer_res = GET_FIELD(le32_to_cpu(sb_entry.params),
2130 			      CAU_SB_ENTRY_TIMER_RES1);
2131 
2132 	address = BAR0_MAP_REG_XSDM_RAM +
2133 		  XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2134 	coalesce = qed_rd(p_hwfn, p_ptt, address);
2135 
2136 	is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2137 	if (!is_valid)
2138 		return -EINVAL;
2139 
2140 	coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2141 	*p_tx_coal = (u16)(coalesce << timer_res);
2142 
2143 	return 0;
2144 }
2145 
2146 int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle)
2147 {
2148 	struct qed_queue_cid *p_cid = handle;
2149 	struct qed_ptt *p_ptt;
2150 	int rc = 0;
2151 
2152 	if (IS_VF(p_hwfn->cdev)) {
2153 		rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
2154 		if (rc)
2155 			DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
2156 
2157 		return rc;
2158 	}
2159 
2160 	p_ptt = qed_ptt_acquire(p_hwfn);
2161 	if (!p_ptt)
2162 		return -EAGAIN;
2163 
2164 	if (p_cid->b_is_rx) {
2165 		rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2166 		if (rc)
2167 			goto out;
2168 	} else {
2169 		rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2170 		if (rc)
2171 			goto out;
2172 	}
2173 
2174 out:
2175 	qed_ptt_release(p_hwfn, p_ptt);
2176 
2177 	return rc;
2178 }
2179 
2180 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2181 				 struct qed_dev_eth_info *info)
2182 {
2183 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2184 	int i;
2185 
2186 	memset(info, 0, sizeof(*info));
2187 
2188 	if (IS_PF(cdev)) {
2189 		int max_vf_vlan_filters = 0;
2190 		int max_vf_mac_filters = 0;
2191 
2192 		info->num_tc = p_hwfn->hw_info.num_hw_tc;
2193 
2194 		if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
2195 			u16 num_queues = 0;
2196 
2197 			/* Since the feature controls only queue-zones,
2198 			 * make sure we have the contexts [rx, xdp, tcs] to
2199 			 * match.
2200 			 */
2201 			for_each_hwfn(cdev, i) {
2202 				struct qed_hwfn *hwfn = &cdev->hwfns[i];
2203 				u16 l2_queues = (u16)FEAT_NUM(hwfn,
2204 							      QED_PF_L2_QUE);
2205 				u16 cids;
2206 
2207 				cids = hwfn->pf_params.eth_pf_params.num_cons;
2208 				cids /= (2 + info->num_tc);
2209 				num_queues += min_t(u16, l2_queues, cids);
2210 			}
2211 
2212 			/* queues might theoretically be >256, but interrupts'
2213 			 * upper-limit guarantes that it would fit in a u8.
2214 			 */
2215 			if (cdev->int_params.fp_msix_cnt) {
2216 				u8 irqs = cdev->int_params.fp_msix_cnt;
2217 
2218 				info->num_queues = (u8)min_t(u16,
2219 							     num_queues, irqs);
2220 			}
2221 		} else {
2222 			info->num_queues = cdev->num_hwfns;
2223 		}
2224 
2225 		if (IS_QED_SRIOV(cdev)) {
2226 			max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
2227 					      QED_ETH_VF_NUM_VLAN_FILTERS;
2228 			max_vf_mac_filters = cdev->p_iov_info->total_vfs *
2229 					     QED_ETH_VF_NUM_MAC_FILTERS;
2230 		}
2231 		info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2232 						  QED_VLAN) -
2233 					 max_vf_vlan_filters;
2234 		info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2235 						 QED_MAC) -
2236 					max_vf_mac_filters;
2237 
2238 		ether_addr_copy(info->port_mac,
2239 				cdev->hwfns[0].hw_info.hw_mac_addr);
2240 
2241 		info->xdp_supported = true;
2242 	} else {
2243 		u16 total_cids = 0;
2244 
2245 		info->num_tc = 1;
2246 
2247 		/* Determine queues &  XDP support */
2248 		for_each_hwfn(cdev, i) {
2249 			struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2250 			u8 queues, cids;
2251 
2252 			qed_vf_get_num_cids(p_hwfn, &cids);
2253 			qed_vf_get_num_rxqs(p_hwfn, &queues);
2254 			info->num_queues += queues;
2255 			total_cids += cids;
2256 		}
2257 
2258 		/* Enable VF XDP in case PF guarntees sufficient connections */
2259 		if (total_cids >= info->num_queues * 3)
2260 			info->xdp_supported = true;
2261 
2262 		qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
2263 					    (u8 *)&info->num_vlan_filters);
2264 		qed_vf_get_num_mac_filters(&cdev->hwfns[0],
2265 					   (u8 *)&info->num_mac_filters);
2266 		qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
2267 
2268 		info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
2269 	}
2270 
2271 	qed_fill_dev_info(cdev, &info->common);
2272 
2273 	if (IS_VF(cdev))
2274 		eth_zero_addr(info->common.hw_mac);
2275 
2276 	return 0;
2277 }
2278 
2279 static void qed_register_eth_ops(struct qed_dev *cdev,
2280 				 struct qed_eth_cb_ops *ops, void *cookie)
2281 {
2282 	cdev->protocol_ops.eth = ops;
2283 	cdev->ops_cookie = cookie;
2284 
2285 	/* For VF, we start bulletin reading */
2286 	if (IS_VF(cdev))
2287 		qed_vf_start_iov_wq(cdev);
2288 }
2289 
2290 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
2291 {
2292 	if (IS_PF(cdev))
2293 		return true;
2294 
2295 	return qed_vf_check_mac(&cdev->hwfns[0], mac);
2296 }
2297 
2298 static int qed_start_vport(struct qed_dev *cdev,
2299 			   struct qed_start_vport_params *params)
2300 {
2301 	int rc, i;
2302 
2303 	for_each_hwfn(cdev, i) {
2304 		struct qed_sp_vport_start_params start = { 0 };
2305 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2306 
2307 		start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
2308 							QED_TPA_MODE_NONE;
2309 		start.remove_inner_vlan = params->remove_inner_vlan;
2310 		start.only_untagged = true;	/* untagged only */
2311 		start.drop_ttl0 = params->drop_ttl0;
2312 		start.opaque_fid = p_hwfn->hw_info.opaque_fid;
2313 		start.concrete_fid = p_hwfn->hw_info.concrete_fid;
2314 		start.handle_ptp_pkts = params->handle_ptp_pkts;
2315 		start.vport_id = params->vport_id;
2316 		start.max_buffers_per_cqe = 16;
2317 		start.mtu = params->mtu;
2318 
2319 		rc = qed_sp_vport_start(p_hwfn, &start);
2320 		if (rc) {
2321 			DP_ERR(cdev, "Failed to start VPORT\n");
2322 			return rc;
2323 		}
2324 
2325 		rc = qed_hw_start_fastpath(p_hwfn);
2326 		if (rc) {
2327 			DP_ERR(cdev, "Failed to start VPORT fastpath\n");
2328 			return rc;
2329 		}
2330 
2331 		DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2332 			   "Started V-PORT %d with MTU %d\n",
2333 			   start.vport_id, start.mtu);
2334 	}
2335 
2336 	if (params->clear_stats)
2337 		qed_reset_vport_stats(cdev);
2338 
2339 	return 0;
2340 }
2341 
2342 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
2343 {
2344 	int rc, i;
2345 
2346 	for_each_hwfn(cdev, i) {
2347 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2348 
2349 		rc = qed_sp_vport_stop(p_hwfn,
2350 				       p_hwfn->hw_info.opaque_fid, vport_id);
2351 
2352 		if (rc) {
2353 			DP_ERR(cdev, "Failed to stop VPORT\n");
2354 			return rc;
2355 		}
2356 	}
2357 	return 0;
2358 }
2359 
2360 static int qed_update_vport_rss(struct qed_dev *cdev,
2361 				struct qed_update_vport_rss_params *input,
2362 				struct qed_rss_params *rss)
2363 {
2364 	int i, fn;
2365 
2366 	/* Update configuration with what's correct regardless of CMT */
2367 	rss->update_rss_config = 1;
2368 	rss->rss_enable = 1;
2369 	rss->update_rss_capabilities = 1;
2370 	rss->update_rss_ind_table = 1;
2371 	rss->update_rss_key = 1;
2372 	rss->rss_caps = input->rss_caps;
2373 	memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
2374 
2375 	/* In regular scenario, we'd simply need to take input handlers.
2376 	 * But in CMT, we'd have to split the handlers according to the
2377 	 * engine they were configured on. We'd then have to understand
2378 	 * whether RSS is really required, since 2-queues on CMT doesn't
2379 	 * require RSS.
2380 	 */
2381 	if (cdev->num_hwfns == 1) {
2382 		memcpy(rss->rss_ind_table,
2383 		       input->rss_ind_table,
2384 		       QED_RSS_IND_TABLE_SIZE * sizeof(void *));
2385 		rss->rss_table_size_log = 7;
2386 		return 0;
2387 	}
2388 
2389 	/* Start by copying the non-spcific information to the 2nd copy */
2390 	memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
2391 
2392 	/* CMT should be round-robin */
2393 	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
2394 		struct qed_queue_cid *cid = input->rss_ind_table[i];
2395 		struct qed_rss_params *t_rss;
2396 
2397 		if (cid->p_owner == QED_LEADING_HWFN(cdev))
2398 			t_rss = &rss[0];
2399 		else
2400 			t_rss = &rss[1];
2401 
2402 		t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
2403 	}
2404 
2405 	/* Make sure RSS is actually required */
2406 	for_each_hwfn(cdev, fn) {
2407 		for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
2408 			if (rss[fn].rss_ind_table[i] !=
2409 			    rss[fn].rss_ind_table[0])
2410 				break;
2411 		}
2412 		if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
2413 			DP_VERBOSE(cdev, NETIF_MSG_IFUP,
2414 				   "CMT - 1 queue per-hwfn; Disabling RSS\n");
2415 			return -EINVAL;
2416 		}
2417 		rss[fn].rss_table_size_log = 6;
2418 	}
2419 
2420 	return 0;
2421 }
2422 
2423 static int qed_update_vport(struct qed_dev *cdev,
2424 			    struct qed_update_vport_params *params)
2425 {
2426 	struct qed_sp_vport_update_params sp_params;
2427 	struct qed_rss_params *rss;
2428 	int rc = 0, i;
2429 
2430 	if (!cdev)
2431 		return -ENODEV;
2432 
2433 	rss = vzalloc(array_size(sizeof(*rss), cdev->num_hwfns));
2434 	if (!rss)
2435 		return -ENOMEM;
2436 
2437 	memset(&sp_params, 0, sizeof(sp_params));
2438 
2439 	/* Translate protocol params into sp params */
2440 	sp_params.vport_id = params->vport_id;
2441 	sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
2442 	sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
2443 	sp_params.vport_active_rx_flg = params->vport_active_flg;
2444 	sp_params.vport_active_tx_flg = params->vport_active_flg;
2445 	sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
2446 	sp_params.tx_switching_flg = params->tx_switching_flg;
2447 	sp_params.accept_any_vlan = params->accept_any_vlan;
2448 	sp_params.update_accept_any_vlan_flg =
2449 		params->update_accept_any_vlan_flg;
2450 
2451 	/* Prepare the RSS configuration */
2452 	if (params->update_rss_flg)
2453 		if (qed_update_vport_rss(cdev, &params->rss_params, rss))
2454 			params->update_rss_flg = 0;
2455 
2456 	for_each_hwfn(cdev, i) {
2457 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2458 
2459 		if (params->update_rss_flg)
2460 			sp_params.rss_params = &rss[i];
2461 
2462 		sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2463 		rc = qed_sp_vport_update(p_hwfn, &sp_params,
2464 					 QED_SPQ_MODE_EBLOCK,
2465 					 NULL);
2466 		if (rc) {
2467 			DP_ERR(cdev, "Failed to update VPORT\n");
2468 			goto out;
2469 		}
2470 
2471 		DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2472 			   "Updated V-PORT %d: active_flag %d [update %d]\n",
2473 			   params->vport_id, params->vport_active_flg,
2474 			   params->update_vport_active_flg);
2475 	}
2476 
2477 out:
2478 	vfree(rss);
2479 	return rc;
2480 }
2481 
2482 static int qed_start_rxq(struct qed_dev *cdev,
2483 			 u8 rss_num,
2484 			 struct qed_queue_start_common_params *p_params,
2485 			 u16 bd_max_bytes,
2486 			 dma_addr_t bd_chain_phys_addr,
2487 			 dma_addr_t cqe_pbl_addr,
2488 			 u16 cqe_pbl_size,
2489 			 struct qed_rxq_start_ret_params *ret_params)
2490 {
2491 	struct qed_hwfn *p_hwfn;
2492 	int rc, hwfn_index;
2493 
2494 	hwfn_index = rss_num % cdev->num_hwfns;
2495 	p_hwfn = &cdev->hwfns[hwfn_index];
2496 
2497 	p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2498 	p_params->stats_id = p_params->vport_id;
2499 
2500 	rc = qed_eth_rx_queue_start(p_hwfn,
2501 				    p_hwfn->hw_info.opaque_fid,
2502 				    p_params,
2503 				    bd_max_bytes,
2504 				    bd_chain_phys_addr,
2505 				    cqe_pbl_addr, cqe_pbl_size, ret_params);
2506 	if (rc) {
2507 		DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
2508 		return rc;
2509 	}
2510 
2511 	DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2512 		   "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2513 		   p_params->queue_id, rss_num, p_params->vport_id,
2514 		   p_params->p_sb->igu_sb_id);
2515 
2516 	return 0;
2517 }
2518 
2519 static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
2520 {
2521 	int rc, hwfn_index;
2522 	struct qed_hwfn *p_hwfn;
2523 
2524 	hwfn_index = rss_id % cdev->num_hwfns;
2525 	p_hwfn = &cdev->hwfns[hwfn_index];
2526 
2527 	rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
2528 	if (rc) {
2529 		DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
2530 		return rc;
2531 	}
2532 
2533 	return 0;
2534 }
2535 
2536 static int qed_start_txq(struct qed_dev *cdev,
2537 			 u8 rss_num,
2538 			 struct qed_queue_start_common_params *p_params,
2539 			 dma_addr_t pbl_addr,
2540 			 u16 pbl_size,
2541 			 struct qed_txq_start_ret_params *ret_params)
2542 {
2543 	struct qed_hwfn *p_hwfn;
2544 	int rc, hwfn_index;
2545 
2546 	hwfn_index = rss_num % cdev->num_hwfns;
2547 	p_hwfn = &cdev->hwfns[hwfn_index];
2548 	p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2549 	p_params->stats_id = p_params->vport_id;
2550 
2551 	rc = qed_eth_tx_queue_start(p_hwfn,
2552 				    p_hwfn->hw_info.opaque_fid,
2553 				    p_params, p_params->tc,
2554 				    pbl_addr, pbl_size, ret_params);
2555 
2556 	if (rc) {
2557 		DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
2558 		return rc;
2559 	}
2560 
2561 	DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2562 		   "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2563 		   p_params->queue_id, rss_num, p_params->vport_id,
2564 		   p_params->p_sb->igu_sb_id);
2565 
2566 	return 0;
2567 }
2568 
2569 #define QED_HW_STOP_RETRY_LIMIT (10)
2570 static int qed_fastpath_stop(struct qed_dev *cdev)
2571 {
2572 	int rc;
2573 
2574 	rc = qed_hw_stop_fastpath(cdev);
2575 	if (rc) {
2576 		DP_ERR(cdev, "Failed to stop Fastpath\n");
2577 		return rc;
2578 	}
2579 
2580 	return 0;
2581 }
2582 
2583 static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
2584 {
2585 	struct qed_hwfn *p_hwfn;
2586 	int rc, hwfn_index;
2587 
2588 	hwfn_index = rss_id % cdev->num_hwfns;
2589 	p_hwfn = &cdev->hwfns[hwfn_index];
2590 
2591 	rc = qed_eth_tx_queue_stop(p_hwfn, handle);
2592 	if (rc) {
2593 		DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
2594 		return rc;
2595 	}
2596 
2597 	return 0;
2598 }
2599 
2600 static int qed_tunn_configure(struct qed_dev *cdev,
2601 			      struct qed_tunn_params *tunn_params)
2602 {
2603 	struct qed_tunnel_info tunn_info;
2604 	int i, rc;
2605 
2606 	memset(&tunn_info, 0, sizeof(tunn_info));
2607 	if (tunn_params->update_vxlan_port) {
2608 		tunn_info.vxlan_port.b_update_port = true;
2609 		tunn_info.vxlan_port.port = tunn_params->vxlan_port;
2610 	}
2611 
2612 	if (tunn_params->update_geneve_port) {
2613 		tunn_info.geneve_port.b_update_port = true;
2614 		tunn_info.geneve_port.port = tunn_params->geneve_port;
2615 	}
2616 
2617 	for_each_hwfn(cdev, i) {
2618 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
2619 		struct qed_ptt *p_ptt;
2620 		struct qed_tunnel_info *tun;
2621 
2622 		tun = &hwfn->cdev->tunnel;
2623 		if (IS_PF(cdev)) {
2624 			p_ptt = qed_ptt_acquire(hwfn);
2625 			if (!p_ptt)
2626 				return -EAGAIN;
2627 		} else {
2628 			p_ptt = NULL;
2629 		}
2630 
2631 		rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
2632 					       QED_SPQ_MODE_EBLOCK, NULL);
2633 		if (rc) {
2634 			if (IS_PF(cdev))
2635 				qed_ptt_release(hwfn, p_ptt);
2636 			return rc;
2637 		}
2638 
2639 		if (IS_PF_SRIOV(hwfn)) {
2640 			u16 vxlan_port, geneve_port;
2641 			int j;
2642 
2643 			vxlan_port = tun->vxlan_port.port;
2644 			geneve_port = tun->geneve_port.port;
2645 
2646 			qed_for_each_vf(hwfn, j) {
2647 				qed_iov_bulletin_set_udp_ports(hwfn, j,
2648 							       vxlan_port,
2649 							       geneve_port);
2650 			}
2651 
2652 			qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2653 		}
2654 		if (IS_PF(cdev))
2655 			qed_ptt_release(hwfn, p_ptt);
2656 	}
2657 
2658 	return 0;
2659 }
2660 
2661 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2662 					enum qed_filter_rx_mode_type type)
2663 {
2664 	struct qed_filter_accept_flags accept_flags;
2665 
2666 	memset(&accept_flags, 0, sizeof(accept_flags));
2667 
2668 	accept_flags.update_rx_mode_config = 1;
2669 	accept_flags.update_tx_mode_config = 1;
2670 	accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2671 					QED_ACCEPT_MCAST_MATCHED |
2672 					QED_ACCEPT_BCAST;
2673 	accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2674 					QED_ACCEPT_MCAST_MATCHED |
2675 					QED_ACCEPT_BCAST;
2676 
2677 	if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2678 		accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2679 						 QED_ACCEPT_MCAST_UNMATCHED;
2680 		accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2681 						 QED_ACCEPT_MCAST_UNMATCHED;
2682 	} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2683 		accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2684 		accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2685 	}
2686 
2687 	return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
2688 				     QED_SPQ_MODE_CB, NULL);
2689 }
2690 
2691 static int qed_configure_filter_ucast(struct qed_dev *cdev,
2692 				      struct qed_filter_ucast_params *params)
2693 {
2694 	struct qed_filter_ucast ucast;
2695 
2696 	if (!params->vlan_valid && !params->mac_valid) {
2697 		DP_NOTICE(cdev,
2698 			  "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
2699 		return -EINVAL;
2700 	}
2701 
2702 	memset(&ucast, 0, sizeof(ucast));
2703 	switch (params->type) {
2704 	case QED_FILTER_XCAST_TYPE_ADD:
2705 		ucast.opcode = QED_FILTER_ADD;
2706 		break;
2707 	case QED_FILTER_XCAST_TYPE_DEL:
2708 		ucast.opcode = QED_FILTER_REMOVE;
2709 		break;
2710 	case QED_FILTER_XCAST_TYPE_REPLACE:
2711 		ucast.opcode = QED_FILTER_REPLACE;
2712 		break;
2713 	default:
2714 		DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
2715 			  params->type);
2716 	}
2717 
2718 	if (params->vlan_valid && params->mac_valid) {
2719 		ucast.type = QED_FILTER_MAC_VLAN;
2720 		ether_addr_copy(ucast.mac, params->mac);
2721 		ucast.vlan = params->vlan;
2722 	} else if (params->mac_valid) {
2723 		ucast.type = QED_FILTER_MAC;
2724 		ether_addr_copy(ucast.mac, params->mac);
2725 	} else {
2726 		ucast.type = QED_FILTER_VLAN;
2727 		ucast.vlan = params->vlan;
2728 	}
2729 
2730 	ucast.is_rx_filter = true;
2731 	ucast.is_tx_filter = true;
2732 
2733 	return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
2734 }
2735 
2736 static int qed_configure_filter_mcast(struct qed_dev *cdev,
2737 				      struct qed_filter_mcast_params *params)
2738 {
2739 	struct qed_filter_mcast mcast;
2740 	int i;
2741 
2742 	memset(&mcast, 0, sizeof(mcast));
2743 	switch (params->type) {
2744 	case QED_FILTER_XCAST_TYPE_ADD:
2745 		mcast.opcode = QED_FILTER_ADD;
2746 		break;
2747 	case QED_FILTER_XCAST_TYPE_DEL:
2748 		mcast.opcode = QED_FILTER_REMOVE;
2749 		break;
2750 	default:
2751 		DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
2752 			  params->type);
2753 	}
2754 
2755 	mcast.num_mc_addrs = params->num;
2756 	for (i = 0; i < mcast.num_mc_addrs; i++)
2757 		ether_addr_copy(mcast.mac[i], params->mac[i]);
2758 
2759 	return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
2760 }
2761 
2762 static int qed_configure_filter(struct qed_dev *cdev,
2763 				struct qed_filter_params *params)
2764 {
2765 	enum qed_filter_rx_mode_type accept_flags;
2766 
2767 	switch (params->type) {
2768 	case QED_FILTER_TYPE_UCAST:
2769 		return qed_configure_filter_ucast(cdev, &params->filter.ucast);
2770 	case QED_FILTER_TYPE_MCAST:
2771 		return qed_configure_filter_mcast(cdev, &params->filter.mcast);
2772 	case QED_FILTER_TYPE_RX_MODE:
2773 		accept_flags = params->filter.accept_flags;
2774 		return qed_configure_filter_rx_mode(cdev, accept_flags);
2775 	default:
2776 		DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
2777 		return -EINVAL;
2778 	}
2779 }
2780 
2781 static int qed_configure_arfs_searcher(struct qed_dev *cdev,
2782 				       enum qed_filter_config_mode mode)
2783 {
2784 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2785 	struct qed_arfs_config_params arfs_config_params;
2786 
2787 	memset(&arfs_config_params, 0, sizeof(arfs_config_params));
2788 	arfs_config_params.tcp = true;
2789 	arfs_config_params.udp = true;
2790 	arfs_config_params.ipv4 = true;
2791 	arfs_config_params.ipv6 = true;
2792 	arfs_config_params.mode = mode;
2793 	qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
2794 				&arfs_config_params);
2795 	return 0;
2796 }
2797 
2798 static void
2799 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
2800 			     void *cookie,
2801 			     union event_ring_data *data, u8 fw_return_code)
2802 {
2803 	struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
2804 	void *dev = p_hwfn->cdev->ops_cookie;
2805 
2806 	op->arfs_filter_op(dev, cookie, fw_return_code);
2807 }
2808 
2809 static int
2810 qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
2811 			      void *cookie,
2812 			      struct qed_ntuple_filter_params *params)
2813 {
2814 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2815 	struct qed_spq_comp_cb cb;
2816 	int rc = -EINVAL;
2817 
2818 	cb.function = qed_arfs_sp_response_handler;
2819 	cb.cookie = cookie;
2820 
2821 	if (params->b_is_vf) {
2822 		if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
2823 					   false)) {
2824 			DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
2825 				params->vf_id);
2826 			return rc;
2827 		}
2828 
2829 		params->vport_id = params->vf_id + 1;
2830 		params->qid = QED_RFS_NTUPLE_QID_RSS;
2831 	}
2832 
2833 	rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
2834 	if (rc)
2835 		DP_NOTICE(p_hwfn,
2836 			  "Failed to issue a-RFS filter configuration\n");
2837 	else
2838 		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
2839 			   "Successfully issued a-RFS filter configuration\n");
2840 
2841 	return rc;
2842 }
2843 
2844 static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
2845 {
2846 	struct qed_queue_cid *p_cid = handle;
2847 	struct qed_hwfn *p_hwfn;
2848 	int rc;
2849 
2850 	p_hwfn = p_cid->p_owner;
2851 	rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
2852 	if (rc)
2853 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
2854 			   "Unable to read queue coalescing\n");
2855 
2856 	return rc;
2857 }
2858 
2859 static int qed_fp_cqe_completion(struct qed_dev *dev,
2860 				 u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
2861 {
2862 	return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2863 				      cqe);
2864 }
2865 
2866 static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac)
2867 {
2868 	int i, ret;
2869 
2870 	if (IS_PF(cdev))
2871 		return 0;
2872 
2873 	for_each_hwfn(cdev, i) {
2874 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2875 
2876 		ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac);
2877 		if (ret)
2878 			return ret;
2879 	}
2880 
2881 	return 0;
2882 }
2883 
2884 static const struct qed_eth_ops qed_eth_ops_pass = {
2885 	.common = &qed_common_ops_pass,
2886 #ifdef CONFIG_QED_SRIOV
2887 	.iov = &qed_iov_ops_pass,
2888 #endif
2889 #ifdef CONFIG_DCB
2890 	.dcb = &qed_dcbnl_ops_pass,
2891 #endif
2892 	.ptp = &qed_ptp_ops_pass,
2893 	.fill_dev_info = &qed_fill_eth_dev_info,
2894 	.register_ops = &qed_register_eth_ops,
2895 	.check_mac = &qed_check_mac,
2896 	.vport_start = &qed_start_vport,
2897 	.vport_stop = &qed_stop_vport,
2898 	.vport_update = &qed_update_vport,
2899 	.q_rx_start = &qed_start_rxq,
2900 	.q_rx_stop = &qed_stop_rxq,
2901 	.q_tx_start = &qed_start_txq,
2902 	.q_tx_stop = &qed_stop_txq,
2903 	.filter_config = &qed_configure_filter,
2904 	.fastpath_stop = &qed_fastpath_stop,
2905 	.eth_cqe_completion = &qed_fp_cqe_completion,
2906 	.get_vport_stats = &qed_get_vport_stats,
2907 	.tunn_config = &qed_tunn_configure,
2908 	.ntuple_filter_config = &qed_ntuple_arfs_filter_config,
2909 	.configure_arfs_searcher = &qed_configure_arfs_searcher,
2910 	.get_coalesce = &qed_get_coalesce,
2911 	.req_bulletin_update_mac = &qed_req_bulletin_update_mac,
2912 };
2913 
2914 const struct qed_eth_ops *qed_get_eth_ops(void)
2915 {
2916 	return &qed_eth_ops_pass;
2917 }
2918 EXPORT_SYMBOL(qed_get_eth_ops);
2919 
2920 void qed_put_eth_ops(void)
2921 {
2922 	/* TODO - reference count for module? */
2923 }
2924 EXPORT_SYMBOL(qed_put_eth_ops);
2925