1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <asm/param.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/etherdevice.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/stddef.h>
19 #include <linux/string.h>
20 #include <linux/workqueue.h>
21 #include <linux/bitops.h>
22 #include <linux/bug.h>
23 #include <linux/vmalloc.h>
24 #include "qed.h"
25 #include <linux/qed/qed_chain.h>
26 #include "qed_cxt.h"
27 #include "qed_dcbx.h"
28 #include "qed_dev_api.h"
29 #include <linux/qed/qed_eth_if.h>
30 #include "qed_hsi.h"
31 #include "qed_iro_hsi.h"
32 #include "qed_hw.h"
33 #include "qed_int.h"
34 #include "qed_l2.h"
35 #include "qed_mcp.h"
36 #include "qed_ptp.h"
37 #include "qed_reg_addr.h"
38 #include "qed_sp.h"
39 #include "qed_sriov.h"
40 
41 #define QED_MAX_SGES_NUM 16
42 #define CRC32_POLY 0x1edc6f41
43 
44 struct qed_l2_info {
45 	u32 queues;
46 	unsigned long **pp_qid_usage;
47 
48 	/* The lock is meant to synchronize access to the qid usage */
49 	struct mutex lock;
50 };
51 
52 int qed_l2_alloc(struct qed_hwfn *p_hwfn)
53 {
54 	struct qed_l2_info *p_l2_info;
55 	unsigned long **pp_qids;
56 	u32 i;
57 
58 	if (!QED_IS_L2_PERSONALITY(p_hwfn))
59 		return 0;
60 
61 	p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
62 	if (!p_l2_info)
63 		return -ENOMEM;
64 	p_hwfn->p_l2_info = p_l2_info;
65 
66 	if (IS_PF(p_hwfn->cdev)) {
67 		p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE);
68 	} else {
69 		u8 rx = 0, tx = 0;
70 
71 		qed_vf_get_num_rxqs(p_hwfn, &rx);
72 		qed_vf_get_num_txqs(p_hwfn, &tx);
73 
74 		p_l2_info->queues = max_t(u8, rx, tx);
75 	}
76 
77 	pp_qids = kcalloc(p_l2_info->queues, sizeof(unsigned long *),
78 			  GFP_KERNEL);
79 	if (!pp_qids)
80 		return -ENOMEM;
81 	p_l2_info->pp_qid_usage = pp_qids;
82 
83 	for (i = 0; i < p_l2_info->queues; i++) {
84 		pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL);
85 		if (!pp_qids[i])
86 			return -ENOMEM;
87 	}
88 
89 	return 0;
90 }
91 
92 void qed_l2_setup(struct qed_hwfn *p_hwfn)
93 {
94 	if (!QED_IS_L2_PERSONALITY(p_hwfn))
95 		return;
96 
97 	mutex_init(&p_hwfn->p_l2_info->lock);
98 }
99 
100 void qed_l2_free(struct qed_hwfn *p_hwfn)
101 {
102 	u32 i;
103 
104 	if (!QED_IS_L2_PERSONALITY(p_hwfn))
105 		return;
106 
107 	if (!p_hwfn->p_l2_info)
108 		return;
109 
110 	if (!p_hwfn->p_l2_info->pp_qid_usage)
111 		goto out_l2_info;
112 
113 	/* Free until hit first uninitialized entry */
114 	for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
115 		if (!p_hwfn->p_l2_info->pp_qid_usage[i])
116 			break;
117 		kfree(p_hwfn->p_l2_info->pp_qid_usage[i]);
118 	}
119 
120 	kfree(p_hwfn->p_l2_info->pp_qid_usage);
121 
122 out_l2_info:
123 	kfree(p_hwfn->p_l2_info);
124 	p_hwfn->p_l2_info = NULL;
125 }
126 
127 static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn,
128 					struct qed_queue_cid *p_cid)
129 {
130 	struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info;
131 	u16 queue_id = p_cid->rel.queue_id;
132 	bool b_rc = true;
133 	u8 first;
134 
135 	mutex_lock(&p_l2_info->lock);
136 
137 	if (queue_id >= p_l2_info->queues) {
138 		DP_NOTICE(p_hwfn,
139 			  "Requested to increase usage for qzone %04x out of %08x\n",
140 			  queue_id, p_l2_info->queues);
141 		b_rc = false;
142 		goto out;
143 	}
144 
145 	first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id],
146 					MAX_QUEUES_PER_QZONE);
147 	if (first >= MAX_QUEUES_PER_QZONE) {
148 		b_rc = false;
149 		goto out;
150 	}
151 
152 	__set_bit(first, p_l2_info->pp_qid_usage[queue_id]);
153 	p_cid->qid_usage_idx = first;
154 
155 out:
156 	mutex_unlock(&p_l2_info->lock);
157 	return b_rc;
158 }
159 
160 static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn,
161 					struct qed_queue_cid *p_cid)
162 {
163 	mutex_lock(&p_hwfn->p_l2_info->lock);
164 
165 	clear_bit(p_cid->qid_usage_idx,
166 		  p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
167 
168 	mutex_unlock(&p_hwfn->p_l2_info->lock);
169 }
170 
171 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
172 			       struct qed_queue_cid *p_cid)
173 {
174 	bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID);
175 
176 	if (IS_PF(p_hwfn->cdev) && !b_legacy_vf)
177 		_qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
178 
179 	/* For PF's VFs we maintain the index inside queue-zone in IOV */
180 	if (p_cid->vfid == QED_QUEUE_CID_SELF)
181 		qed_eth_queue_qid_usage_del(p_hwfn, p_cid);
182 
183 	vfree(p_cid);
184 }
185 
186 /* The internal is only meant to be directly called by PFs initializeing CIDs
187  * for their VFs.
188  */
189 static struct qed_queue_cid *
190 _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
191 		      u16 opaque_fid,
192 		      u32 cid,
193 		      struct qed_queue_start_common_params *p_params,
194 		      bool b_is_rx,
195 		      struct qed_queue_cid_vf_params *p_vf_params)
196 {
197 	struct qed_queue_cid *p_cid;
198 	int rc;
199 
200 	p_cid = vzalloc(sizeof(*p_cid));
201 	if (!p_cid)
202 		return NULL;
203 
204 	p_cid->opaque_fid = opaque_fid;
205 	p_cid->cid = cid;
206 	p_cid->p_owner = p_hwfn;
207 
208 	/* Fill in parameters */
209 	p_cid->rel.vport_id = p_params->vport_id;
210 	p_cid->rel.queue_id = p_params->queue_id;
211 	p_cid->rel.stats_id = p_params->stats_id;
212 	p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
213 	p_cid->b_is_rx = b_is_rx;
214 	p_cid->sb_idx = p_params->sb_idx;
215 
216 	/* Fill-in bits related to VFs' queues if information was provided */
217 	if (p_vf_params) {
218 		p_cid->vfid = p_vf_params->vfid;
219 		p_cid->vf_qid = p_vf_params->vf_qid;
220 		p_cid->vf_legacy = p_vf_params->vf_legacy;
221 	} else {
222 		p_cid->vfid = QED_QUEUE_CID_SELF;
223 	}
224 
225 	/* Don't try calculating the absolute indices for VFs */
226 	if (IS_VF(p_hwfn->cdev)) {
227 		p_cid->abs = p_cid->rel;
228 		goto out;
229 	}
230 
231 	/* Calculate the engine-absolute indices of the resources.
232 	 * This would guarantee they're valid later on.
233 	 * In some cases [SBs] we already have the right values.
234 	 */
235 	rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
236 	if (rc)
237 		goto fail;
238 
239 	rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
240 	if (rc)
241 		goto fail;
242 
243 	/* In case of a PF configuring its VF's queues, the stats-id is already
244 	 * absolute [since there's a single index that's suitable per-VF].
245 	 */
246 	if (p_cid->vfid == QED_QUEUE_CID_SELF) {
247 		rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
248 				  &p_cid->abs.stats_id);
249 		if (rc)
250 			goto fail;
251 	} else {
252 		p_cid->abs.stats_id = p_cid->rel.stats_id;
253 	}
254 
255 out:
256 	/* VF-images have provided the qid_usage_idx on their own.
257 	 * Otherwise, we need to allocate a unique one.
258 	 */
259 	if (!p_vf_params) {
260 		if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid))
261 			goto fail;
262 	} else {
263 		p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
264 	}
265 
266 	DP_VERBOSE(p_hwfn,
267 		   QED_MSG_SP,
268 		   "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
269 		   p_cid->opaque_fid,
270 		   p_cid->cid,
271 		   p_cid->rel.vport_id,
272 		   p_cid->abs.vport_id,
273 		   p_cid->rel.queue_id,
274 		   p_cid->qid_usage_idx,
275 		   p_cid->abs.queue_id,
276 		   p_cid->rel.stats_id,
277 		   p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx);
278 
279 	return p_cid;
280 
281 fail:
282 	vfree(p_cid);
283 	return NULL;
284 }
285 
286 struct qed_queue_cid *
287 qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
288 		     u16 opaque_fid,
289 		     struct qed_queue_start_common_params *p_params,
290 		     bool b_is_rx,
291 		     struct qed_queue_cid_vf_params *p_vf_params)
292 {
293 	struct qed_queue_cid *p_cid;
294 	u8 vfid = QED_CXT_PF_CID;
295 	bool b_legacy_vf = false;
296 	u32 cid = 0;
297 
298 	/* In case of legacy VFs, The CID can be derived from the additional
299 	 * VF parameters - the VF assumes queue X uses CID X, so we can simply
300 	 * use the vf_qid for this purpose as well.
301 	 */
302 	if (p_vf_params) {
303 		vfid = p_vf_params->vfid;
304 
305 		if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) {
306 			b_legacy_vf = true;
307 			cid = p_vf_params->vf_qid;
308 		}
309 	}
310 
311 	/* Get a unique firmware CID for this queue, in case it's a PF.
312 	 * VF's don't need a CID as the queue configuration will be done
313 	 * by PF.
314 	 */
315 	if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
316 		if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
317 					 &cid, vfid)) {
318 			DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
319 			return NULL;
320 		}
321 	}
322 
323 	p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
324 				      p_params, b_is_rx, p_vf_params);
325 	if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
326 		_qed_cxt_release_cid(p_hwfn, cid, vfid);
327 
328 	return p_cid;
329 }
330 
331 static struct qed_queue_cid *
332 qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn,
333 			u16 opaque_fid,
334 			bool b_is_rx,
335 			struct qed_queue_start_common_params *p_params)
336 {
337 	return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
338 				    NULL);
339 }
340 
341 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
342 			   struct qed_sp_vport_start_params *p_params)
343 {
344 	struct vport_start_ramrod_data *p_ramrod = NULL;
345 	struct eth_vport_tpa_param *tpa_param;
346 	struct qed_spq_entry *p_ent =  NULL;
347 	struct qed_sp_init_data init_data;
348 	u16 min_size, rx_mode = 0;
349 	u8 abs_vport_id = 0;
350 	int rc;
351 
352 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
353 	if (rc)
354 		return rc;
355 
356 	memset(&init_data, 0, sizeof(init_data));
357 	init_data.cid = qed_spq_get_cid(p_hwfn);
358 	init_data.opaque_fid = p_params->opaque_fid;
359 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
360 
361 	rc = qed_sp_init_request(p_hwfn, &p_ent,
362 				 ETH_RAMROD_VPORT_START,
363 				 PROTOCOLID_ETH, &init_data);
364 	if (rc)
365 		return rc;
366 
367 	p_ramrod		= &p_ent->ramrod.vport_start;
368 	p_ramrod->vport_id	= abs_vport_id;
369 
370 	p_ramrod->mtu			= cpu_to_le16(p_params->mtu);
371 	p_ramrod->handle_ptp_pkts	= p_params->handle_ptp_pkts;
372 	p_ramrod->inner_vlan_removal_en	= p_params->remove_inner_vlan;
373 	p_ramrod->drop_ttl0_en		= p_params->drop_ttl0;
374 	p_ramrod->untagged		= p_params->only_untagged;
375 
376 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
377 	SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
378 
379 	p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
380 
381 	/* TPA related fields */
382 	tpa_param = &p_ramrod->tpa_param;
383 	memset(tpa_param, 0, sizeof(*tpa_param));
384 
385 	tpa_param->max_buff_num = p_params->max_buffers_per_cqe;
386 
387 	switch (p_params->tpa_mode) {
388 	case QED_TPA_MODE_GRO:
389 		min_size = p_params->mtu / 2;
390 
391 		tpa_param->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
392 		tpa_param->tpa_max_size = cpu_to_le16(U16_MAX);
393 		tpa_param->tpa_min_size_to_cont = cpu_to_le16(min_size);
394 		tpa_param->tpa_min_size_to_start = cpu_to_le16(min_size);
395 		tpa_param->tpa_ipv4_en_flg = 1;
396 		tpa_param->tpa_ipv6_en_flg = 1;
397 		tpa_param->tpa_pkt_split_flg = 1;
398 		tpa_param->tpa_gro_consistent_flg = 1;
399 		break;
400 	default:
401 		break;
402 	}
403 
404 	p_ramrod->tx_switching_en = p_params->tx_switching;
405 
406 	p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
407 	p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
408 
409 	/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
410 	p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
411 						  p_params->concrete_fid);
412 
413 	return qed_spq_post(p_hwfn, p_ent, NULL);
414 }
415 
416 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
417 			      struct qed_sp_vport_start_params *p_params)
418 {
419 	if (IS_VF(p_hwfn->cdev)) {
420 		return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
421 					     p_params->mtu,
422 					     p_params->remove_inner_vlan,
423 					     p_params->tpa_mode,
424 					     p_params->max_buffers_per_cqe,
425 					     p_params->only_untagged);
426 	}
427 
428 	return qed_sp_eth_vport_start(p_hwfn, p_params);
429 }
430 
431 static int
432 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
433 			struct vport_update_ramrod_data *p_ramrod,
434 			struct qed_rss_params *p_rss)
435 {
436 	struct eth_vport_rss_config *p_config;
437 	u16 capabilities = 0;
438 	int i, table_size;
439 	int rc = 0;
440 
441 	if (!p_rss) {
442 		p_ramrod->common.update_rss_flg = 0;
443 		return rc;
444 	}
445 	p_config = &p_ramrod->rss_config;
446 
447 	BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
448 
449 	rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
450 	if (rc)
451 		return rc;
452 
453 	p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
454 	p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
455 	p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
456 	p_config->update_rss_key = p_rss->update_rss_key;
457 
458 	p_config->rss_mode = p_rss->rss_enable ?
459 			     ETH_VPORT_RSS_MODE_REGULAR :
460 			     ETH_VPORT_RSS_MODE_DISABLED;
461 
462 	SET_FIELD(capabilities,
463 		  ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
464 		  !!(p_rss->rss_caps & QED_RSS_IPV4));
465 	SET_FIELD(capabilities,
466 		  ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
467 		  !!(p_rss->rss_caps & QED_RSS_IPV6));
468 	SET_FIELD(capabilities,
469 		  ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
470 		  !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
471 	SET_FIELD(capabilities,
472 		  ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
473 		  !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
474 	SET_FIELD(capabilities,
475 		  ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
476 		  !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
477 	SET_FIELD(capabilities,
478 		  ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
479 		  !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
480 	p_config->tbl_size = p_rss->rss_table_size_log;
481 
482 	p_config->capabilities = cpu_to_le16(capabilities);
483 
484 	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
485 		   "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
486 		   p_ramrod->common.update_rss_flg,
487 		   p_config->rss_mode,
488 		   p_config->update_rss_capabilities,
489 		   p_config->capabilities,
490 		   p_config->update_rss_ind_table, p_config->update_rss_key);
491 
492 	table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
493 			   1 << p_config->tbl_size);
494 	for (i = 0; i < table_size; i++) {
495 		struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
496 
497 		if (!p_queue)
498 			return -EINVAL;
499 
500 		p_config->indirection_table[i] =
501 		    cpu_to_le16(p_queue->abs.queue_id);
502 	}
503 
504 	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
505 		   "Configured RSS indirection table [%d entries]:\n",
506 		   table_size);
507 	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
508 		DP_VERBOSE(p_hwfn,
509 			   NETIF_MSG_IFUP,
510 			   "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
511 			   le16_to_cpu(p_config->indirection_table[i]),
512 			   le16_to_cpu(p_config->indirection_table[i + 1]),
513 			   le16_to_cpu(p_config->indirection_table[i + 2]),
514 			   le16_to_cpu(p_config->indirection_table[i + 3]),
515 			   le16_to_cpu(p_config->indirection_table[i + 4]),
516 			   le16_to_cpu(p_config->indirection_table[i + 5]),
517 			   le16_to_cpu(p_config->indirection_table[i + 6]),
518 			   le16_to_cpu(p_config->indirection_table[i + 7]),
519 			   le16_to_cpu(p_config->indirection_table[i + 8]),
520 			   le16_to_cpu(p_config->indirection_table[i + 9]),
521 			   le16_to_cpu(p_config->indirection_table[i + 10]),
522 			   le16_to_cpu(p_config->indirection_table[i + 11]),
523 			   le16_to_cpu(p_config->indirection_table[i + 12]),
524 			   le16_to_cpu(p_config->indirection_table[i + 13]),
525 			   le16_to_cpu(p_config->indirection_table[i + 14]),
526 			   le16_to_cpu(p_config->indirection_table[i + 15]));
527 	}
528 
529 	for (i = 0; i < 10; i++)
530 		p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
531 
532 	return rc;
533 }
534 
535 static void
536 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
537 			  struct vport_update_ramrod_data *p_ramrod,
538 			  struct qed_filter_accept_flags accept_flags)
539 {
540 	p_ramrod->common.update_rx_mode_flg =
541 		accept_flags.update_rx_mode_config;
542 
543 	p_ramrod->common.update_tx_mode_flg =
544 		accept_flags.update_tx_mode_config;
545 
546 	/* Set Rx mode accept flags */
547 	if (p_ramrod->common.update_rx_mode_flg) {
548 		u8 accept_filter = accept_flags.rx_accept_filter;
549 		u16 state = 0;
550 
551 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
552 			  !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
553 			    !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
554 
555 		SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
556 			  !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
557 
558 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
559 			  !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
560 			    !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
561 
562 		SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
563 			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
564 			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
565 
566 		SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
567 			  !!(accept_filter & QED_ACCEPT_BCAST));
568 
569 		SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI,
570 			  !!(accept_filter & QED_ACCEPT_ANY_VNI));
571 
572 		p_ramrod->rx_mode.state = cpu_to_le16(state);
573 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
574 			   "p_ramrod->rx_mode.state = 0x%x\n", state);
575 	}
576 
577 	/* Set Tx mode accept flags */
578 	if (p_ramrod->common.update_tx_mode_flg) {
579 		u8 accept_filter = accept_flags.tx_accept_filter;
580 		u16 state = 0;
581 
582 		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
583 			  !!(accept_filter & QED_ACCEPT_NONE));
584 
585 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
586 			  !!(accept_filter & QED_ACCEPT_NONE));
587 
588 		SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
589 			  (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
590 			   !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
591 
592 		SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
593 			  (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
594 			   !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
595 
596 		SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
597 			  !!(accept_filter & QED_ACCEPT_BCAST));
598 
599 		p_ramrod->tx_mode.state = cpu_to_le16(state);
600 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
601 			   "p_ramrod->tx_mode.state = 0x%x\n", state);
602 	}
603 }
604 
605 static void
606 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
607 			    struct vport_update_ramrod_data *p_ramrod,
608 			    const struct qed_sge_tpa_params *param)
609 {
610 	struct eth_vport_tpa_param *tpa;
611 
612 	if (!param) {
613 		p_ramrod->common.update_tpa_param_flg = 0;
614 		p_ramrod->common.update_tpa_en_flg = 0;
615 		p_ramrod->common.update_tpa_param_flg = 0;
616 		return;
617 	}
618 
619 	p_ramrod->common.update_tpa_en_flg = param->update_tpa_en_flg;
620 	tpa = &p_ramrod->tpa_param;
621 	tpa->tpa_ipv4_en_flg = param->tpa_ipv4_en_flg;
622 	tpa->tpa_ipv6_en_flg = param->tpa_ipv6_en_flg;
623 	tpa->tpa_ipv4_tunn_en_flg = param->tpa_ipv4_tunn_en_flg;
624 	tpa->tpa_ipv6_tunn_en_flg = param->tpa_ipv6_tunn_en_flg;
625 
626 	p_ramrod->common.update_tpa_param_flg = param->update_tpa_param_flg;
627 	tpa->max_buff_num = param->max_buffers_per_cqe;
628 	tpa->tpa_pkt_split_flg = param->tpa_pkt_split_flg;
629 	tpa->tpa_hdr_data_split_flg = param->tpa_hdr_data_split_flg;
630 	tpa->tpa_gro_consistent_flg = param->tpa_gro_consistent_flg;
631 	tpa->tpa_max_aggs_num = param->tpa_max_aggs_num;
632 	tpa->tpa_max_size = cpu_to_le16(param->tpa_max_size);
633 	tpa->tpa_min_size_to_start = cpu_to_le16(param->tpa_min_size_to_start);
634 	tpa->tpa_min_size_to_cont = cpu_to_le16(param->tpa_min_size_to_cont);
635 }
636 
637 static void
638 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
639 			struct vport_update_ramrod_data *p_ramrod,
640 			struct qed_sp_vport_update_params *p_params)
641 {
642 	int i;
643 
644 	memset(&p_ramrod->approx_mcast.bins, 0,
645 	       sizeof(p_ramrod->approx_mcast.bins));
646 
647 	if (!p_params->update_approx_mcast_flg)
648 		return;
649 
650 	p_ramrod->common.update_approx_mcast_flg = 1;
651 	for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
652 		u32 *p_bins = p_params->bins;
653 
654 		p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
655 	}
656 }
657 
658 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
659 			struct qed_sp_vport_update_params *p_params,
660 			enum spq_mode comp_mode,
661 			struct qed_spq_comp_cb *p_comp_data)
662 {
663 	struct qed_rss_params *p_rss_params = p_params->rss_params;
664 	struct vport_update_ramrod_data_cmn *p_cmn;
665 	struct qed_sp_init_data init_data;
666 	struct vport_update_ramrod_data *p_ramrod = NULL;
667 	struct qed_spq_entry *p_ent = NULL;
668 	u8 abs_vport_id = 0, val;
669 	int rc = -EINVAL;
670 
671 	if (IS_VF(p_hwfn->cdev)) {
672 		rc = qed_vf_pf_vport_update(p_hwfn, p_params);
673 		return rc;
674 	}
675 
676 	rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
677 	if (rc)
678 		return rc;
679 
680 	memset(&init_data, 0, sizeof(init_data));
681 	init_data.cid = qed_spq_get_cid(p_hwfn);
682 	init_data.opaque_fid = p_params->opaque_fid;
683 	init_data.comp_mode = comp_mode;
684 	init_data.p_comp_data = p_comp_data;
685 
686 	rc = qed_sp_init_request(p_hwfn, &p_ent,
687 				 ETH_RAMROD_VPORT_UPDATE,
688 				 PROTOCOLID_ETH, &init_data);
689 	if (rc)
690 		return rc;
691 
692 	/* Copy input params to ramrod according to FW struct */
693 	p_ramrod = &p_ent->ramrod.vport_update;
694 	p_cmn = &p_ramrod->common;
695 
696 	p_cmn->vport_id = abs_vport_id;
697 	p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
698 	p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
699 	p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
700 	p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
701 	p_cmn->accept_any_vlan = p_params->accept_any_vlan;
702 	val = p_params->update_accept_any_vlan_flg;
703 	p_cmn->update_accept_any_vlan_flg = val;
704 
705 	p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
706 	val = p_params->update_inner_vlan_removal_flg;
707 	p_cmn->update_inner_vlan_removal_en_flg = val;
708 
709 	p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
710 	val = p_params->update_default_vlan_enable_flg;
711 	p_cmn->update_default_vlan_en_flg = val;
712 
713 	p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
714 	p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
715 
716 	p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
717 
718 	p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
719 	p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
720 
721 	p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
722 	val = p_params->update_anti_spoofing_en_flg;
723 	p_ramrod->common.update_anti_spoofing_en_flg = val;
724 
725 	rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
726 	if (rc) {
727 		qed_sp_destroy_request(p_hwfn, p_ent);
728 		return rc;
729 	}
730 
731 	if (p_params->update_ctl_frame_check) {
732 		p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
733 		p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
734 	}
735 
736 	/* Update mcast bins for VFs, PF doesn't use this functionality */
737 	qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
738 
739 	qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
740 	qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
741 	return qed_spq_post(p_hwfn, p_ent, NULL);
742 }
743 
744 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
745 {
746 	struct vport_stop_ramrod_data *p_ramrod;
747 	struct qed_sp_init_data init_data;
748 	struct qed_spq_entry *p_ent;
749 	u8 abs_vport_id = 0;
750 	int rc;
751 
752 	if (IS_VF(p_hwfn->cdev))
753 		return qed_vf_pf_vport_stop(p_hwfn);
754 
755 	rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
756 	if (rc)
757 		return rc;
758 
759 	memset(&init_data, 0, sizeof(init_data));
760 	init_data.cid = qed_spq_get_cid(p_hwfn);
761 	init_data.opaque_fid = opaque_fid;
762 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
763 
764 	rc = qed_sp_init_request(p_hwfn, &p_ent,
765 				 ETH_RAMROD_VPORT_STOP,
766 				 PROTOCOLID_ETH, &init_data);
767 	if (rc)
768 		return rc;
769 
770 	p_ramrod = &p_ent->ramrod.vport_stop;
771 	p_ramrod->vport_id = abs_vport_id;
772 
773 	return qed_spq_post(p_hwfn, p_ent, NULL);
774 }
775 
776 static int
777 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
778 		       struct qed_filter_accept_flags *p_accept_flags)
779 {
780 	struct qed_sp_vport_update_params s_params;
781 
782 	memset(&s_params, 0, sizeof(s_params));
783 	memcpy(&s_params.accept_flags, p_accept_flags,
784 	       sizeof(struct qed_filter_accept_flags));
785 
786 	return qed_vf_pf_vport_update(p_hwfn, &s_params);
787 }
788 
789 static int qed_filter_accept_cmd(struct qed_dev *cdev,
790 				 u8 vport,
791 				 struct qed_filter_accept_flags accept_flags,
792 				 u8 update_accept_any_vlan,
793 				 u8 accept_any_vlan,
794 				 enum spq_mode comp_mode,
795 				 struct qed_spq_comp_cb *p_comp_data)
796 {
797 	struct qed_sp_vport_update_params vport_update_params;
798 	int i, rc;
799 
800 	/* Prepare and send the vport rx_mode change */
801 	memset(&vport_update_params, 0, sizeof(vport_update_params));
802 	vport_update_params.vport_id = vport;
803 	vport_update_params.accept_flags = accept_flags;
804 	vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
805 	vport_update_params.accept_any_vlan = accept_any_vlan;
806 
807 	for_each_hwfn(cdev, i) {
808 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
809 
810 		vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
811 
812 		if (IS_VF(cdev)) {
813 			rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
814 			if (rc)
815 				return rc;
816 			continue;
817 		}
818 
819 		rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
820 					 comp_mode, p_comp_data);
821 		if (rc) {
822 			DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
823 			return rc;
824 		}
825 
826 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
827 			   "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
828 			   accept_flags.rx_accept_filter,
829 			   accept_flags.tx_accept_filter);
830 		if (update_accept_any_vlan)
831 			DP_VERBOSE(p_hwfn, QED_MSG_SP,
832 				   "accept_any_vlan=%d configured\n",
833 				   accept_any_vlan);
834 	}
835 
836 	return 0;
837 }
838 
839 int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
840 			     struct qed_queue_cid *p_cid,
841 			     u16 bd_max_bytes,
842 			     dma_addr_t bd_chain_phys_addr,
843 			     dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
844 {
845 	struct rx_queue_start_ramrod_data *p_ramrod = NULL;
846 	struct qed_spq_entry *p_ent = NULL;
847 	struct qed_sp_init_data init_data;
848 	int rc = -EINVAL;
849 
850 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
851 		   "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
852 		   p_cid->opaque_fid, p_cid->cid,
853 		   p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id);
854 
855 	/* Get SPQ entry */
856 	memset(&init_data, 0, sizeof(init_data));
857 	init_data.cid = p_cid->cid;
858 	init_data.opaque_fid = p_cid->opaque_fid;
859 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
860 
861 	rc = qed_sp_init_request(p_hwfn, &p_ent,
862 				 ETH_RAMROD_RX_QUEUE_START,
863 				 PROTOCOLID_ETH, &init_data);
864 	if (rc)
865 		return rc;
866 
867 	p_ramrod = &p_ent->ramrod.rx_queue_start;
868 
869 	p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
870 	p_ramrod->sb_index = p_cid->sb_idx;
871 	p_ramrod->vport_id = p_cid->abs.vport_id;
872 	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
873 	p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
874 	p_ramrod->complete_cqe_flg = 0;
875 	p_ramrod->complete_event_flg = 1;
876 
877 	p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
878 	DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
879 
880 	p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
881 	DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
882 
883 	if (p_cid->vfid != QED_QUEUE_CID_SELF) {
884 		bool b_legacy_vf = !!(p_cid->vf_legacy &
885 				      QED_QCID_LEGACY_VF_RX_PROD);
886 
887 		p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
888 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
889 			   "Queue%s is meant for VF rxq[%02x]\n",
890 			   b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid);
891 		p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
892 	}
893 
894 	return qed_spq_post(p_hwfn, p_ent, NULL);
895 }
896 
897 static int
898 qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
899 			  struct qed_queue_cid *p_cid,
900 			  u16 bd_max_bytes,
901 			  dma_addr_t bd_chain_phys_addr,
902 			  dma_addr_t cqe_pbl_addr,
903 			  u16 cqe_pbl_size, void __iomem **pp_prod)
904 {
905 	u32 init_prod_val = 0;
906 
907 	*pp_prod = (u8 __iomem *)
908 	    p_hwfn->regview +
909 	    GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
910 			     MSTORM_ETH_PF_PRODS, p_cid->abs.queue_id);
911 
912 	/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
913 	__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
914 			  (u32 *)(&init_prod_val));
915 
916 	return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
917 					bd_max_bytes,
918 					bd_chain_phys_addr,
919 					cqe_pbl_addr, cqe_pbl_size);
920 }
921 
922 static int
923 qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
924 		       u16 opaque_fid,
925 		       struct qed_queue_start_common_params *p_params,
926 		       u16 bd_max_bytes,
927 		       dma_addr_t bd_chain_phys_addr,
928 		       dma_addr_t cqe_pbl_addr,
929 		       u16 cqe_pbl_size,
930 		       struct qed_rxq_start_ret_params *p_ret_params)
931 {
932 	struct qed_queue_cid *p_cid;
933 	int rc;
934 
935 	/* Allocate a CID for the queue */
936 	p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
937 	if (!p_cid)
938 		return -ENOMEM;
939 
940 	if (IS_PF(p_hwfn->cdev)) {
941 		rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
942 					       bd_max_bytes,
943 					       bd_chain_phys_addr,
944 					       cqe_pbl_addr, cqe_pbl_size,
945 					       &p_ret_params->p_prod);
946 	} else {
947 		rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
948 					 bd_max_bytes,
949 					 bd_chain_phys_addr,
950 					 cqe_pbl_addr,
951 					 cqe_pbl_size, &p_ret_params->p_prod);
952 	}
953 
954 	/* Provide the caller with a reference to as handler */
955 	if (rc)
956 		qed_eth_queue_cid_release(p_hwfn, p_cid);
957 	else
958 		p_ret_params->p_handle = (void *)p_cid;
959 
960 	return rc;
961 }
962 
963 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
964 				void **pp_rxq_handles,
965 				u8 num_rxqs,
966 				u8 complete_cqe_flg,
967 				u8 complete_event_flg,
968 				enum spq_mode comp_mode,
969 				struct qed_spq_comp_cb *p_comp_data)
970 {
971 	struct rx_queue_update_ramrod_data *p_ramrod = NULL;
972 	struct qed_spq_entry *p_ent = NULL;
973 	struct qed_sp_init_data init_data;
974 	struct qed_queue_cid *p_cid;
975 	int rc = -EINVAL;
976 	u8 i;
977 
978 	memset(&init_data, 0, sizeof(init_data));
979 	init_data.comp_mode = comp_mode;
980 	init_data.p_comp_data = p_comp_data;
981 
982 	for (i = 0; i < num_rxqs; i++) {
983 		p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
984 
985 		/* Get SPQ entry */
986 		init_data.cid = p_cid->cid;
987 		init_data.opaque_fid = p_cid->opaque_fid;
988 
989 		rc = qed_sp_init_request(p_hwfn, &p_ent,
990 					 ETH_RAMROD_RX_QUEUE_UPDATE,
991 					 PROTOCOLID_ETH, &init_data);
992 		if (rc)
993 			return rc;
994 
995 		p_ramrod = &p_ent->ramrod.rx_queue_update;
996 		p_ramrod->vport_id = p_cid->abs.vport_id;
997 
998 		p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
999 		p_ramrod->complete_cqe_flg = complete_cqe_flg;
1000 		p_ramrod->complete_event_flg = complete_event_flg;
1001 
1002 		rc = qed_spq_post(p_hwfn, p_ent, NULL);
1003 		if (rc)
1004 			return rc;
1005 	}
1006 
1007 	return rc;
1008 }
1009 
1010 static int
1011 qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
1012 			 struct qed_queue_cid *p_cid,
1013 			 bool b_eq_completion_only, bool b_cqe_completion)
1014 {
1015 	struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
1016 	struct qed_spq_entry *p_ent = NULL;
1017 	struct qed_sp_init_data init_data;
1018 	int rc;
1019 
1020 	memset(&init_data, 0, sizeof(init_data));
1021 	init_data.cid = p_cid->cid;
1022 	init_data.opaque_fid = p_cid->opaque_fid;
1023 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1024 
1025 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1026 				 ETH_RAMROD_RX_QUEUE_STOP,
1027 				 PROTOCOLID_ETH, &init_data);
1028 	if (rc)
1029 		return rc;
1030 
1031 	p_ramrod = &p_ent->ramrod.rx_queue_stop;
1032 	p_ramrod->vport_id = p_cid->abs.vport_id;
1033 	p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1034 
1035 	/* Cleaning the queue requires the completion to arrive there.
1036 	 * In addition, VFs require the answer to come as eqe to PF.
1037 	 */
1038 	p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
1039 				      !b_eq_completion_only) ||
1040 				     b_cqe_completion;
1041 	p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) ||
1042 				       b_eq_completion_only;
1043 
1044 	return qed_spq_post(p_hwfn, p_ent, NULL);
1045 }
1046 
1047 int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
1048 			  void *p_rxq,
1049 			  bool eq_completion_only, bool cqe_completion)
1050 {
1051 	struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
1052 	int rc = -EINVAL;
1053 
1054 	if (IS_PF(p_hwfn->cdev))
1055 		rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1056 					      eq_completion_only,
1057 					      cqe_completion);
1058 	else
1059 		rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1060 
1061 	if (!rc)
1062 		qed_eth_queue_cid_release(p_hwfn, p_cid);
1063 	return rc;
1064 }
1065 
1066 int
1067 qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
1068 			 struct qed_queue_cid *p_cid,
1069 			 dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
1070 {
1071 	struct tx_queue_start_ramrod_data *p_ramrod = NULL;
1072 	struct qed_spq_entry *p_ent = NULL;
1073 	struct qed_sp_init_data init_data;
1074 	int rc = -EINVAL;
1075 
1076 	/* Get SPQ entry */
1077 	memset(&init_data, 0, sizeof(init_data));
1078 	init_data.cid = p_cid->cid;
1079 	init_data.opaque_fid = p_cid->opaque_fid;
1080 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1081 
1082 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1083 				 ETH_RAMROD_TX_QUEUE_START,
1084 				 PROTOCOLID_ETH, &init_data);
1085 	if (rc)
1086 		return rc;
1087 
1088 	p_ramrod = &p_ent->ramrod.tx_queue_start;
1089 	p_ramrod->vport_id = p_cid->abs.vport_id;
1090 
1091 	p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
1092 	p_ramrod->sb_index = p_cid->sb_idx;
1093 	p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1094 
1095 	p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
1096 	p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
1097 
1098 	p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1099 	DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1100 
1101 	p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1102 
1103 	return qed_spq_post(p_hwfn, p_ent, NULL);
1104 }
1105 
1106 static int
1107 qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
1108 			  struct qed_queue_cid *p_cid,
1109 			  u8 tc,
1110 			  dma_addr_t pbl_addr,
1111 			  u16 pbl_size, void __iomem **pp_doorbell)
1112 {
1113 	int rc;
1114 
1115 	rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
1116 				      pbl_addr, pbl_size,
1117 				      qed_get_cm_pq_idx_mcos(p_hwfn, tc));
1118 	if (rc)
1119 		return rc;
1120 
1121 	/* Provide the caller with the necessary return values */
1122 	*pp_doorbell = p_hwfn->doorbells +
1123 		       qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
1124 
1125 	return 0;
1126 }
1127 
1128 static int
1129 qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
1130 		       u16 opaque_fid,
1131 		       struct qed_queue_start_common_params *p_params,
1132 		       u8 tc,
1133 		       dma_addr_t pbl_addr,
1134 		       u16 pbl_size,
1135 		       struct qed_txq_start_ret_params *p_ret_params)
1136 {
1137 	struct qed_queue_cid *p_cid;
1138 	int rc;
1139 
1140 	p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
1141 	if (!p_cid)
1142 		return -EINVAL;
1143 
1144 	if (IS_PF(p_hwfn->cdev))
1145 		rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1146 					       pbl_addr, pbl_size,
1147 					       &p_ret_params->p_doorbell);
1148 	else
1149 		rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
1150 					 pbl_addr, pbl_size,
1151 					 &p_ret_params->p_doorbell);
1152 
1153 	if (rc)
1154 		qed_eth_queue_cid_release(p_hwfn, p_cid);
1155 	else
1156 		p_ret_params->p_handle = (void *)p_cid;
1157 
1158 	return rc;
1159 }
1160 
1161 static int
1162 qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
1163 {
1164 	struct qed_spq_entry *p_ent = NULL;
1165 	struct qed_sp_init_data init_data;
1166 	int rc;
1167 
1168 	memset(&init_data, 0, sizeof(init_data));
1169 	init_data.cid = p_cid->cid;
1170 	init_data.opaque_fid = p_cid->opaque_fid;
1171 	init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1172 
1173 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1174 				 ETH_RAMROD_TX_QUEUE_STOP,
1175 				 PROTOCOLID_ETH, &init_data);
1176 	if (rc)
1177 		return rc;
1178 
1179 	return qed_spq_post(p_hwfn, p_ent, NULL);
1180 }
1181 
1182 int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
1183 {
1184 	struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
1185 	int rc;
1186 
1187 	if (IS_PF(p_hwfn->cdev))
1188 		rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1189 	else
1190 		rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
1191 
1192 	if (!rc)
1193 		qed_eth_queue_cid_release(p_hwfn, p_cid);
1194 	return rc;
1195 }
1196 
1197 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
1198 {
1199 	enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1200 
1201 	switch (opcode) {
1202 	case QED_FILTER_ADD:
1203 		action = ETH_FILTER_ACTION_ADD;
1204 		break;
1205 	case QED_FILTER_REMOVE:
1206 		action = ETH_FILTER_ACTION_REMOVE;
1207 		break;
1208 	case QED_FILTER_FLUSH:
1209 		action = ETH_FILTER_ACTION_REMOVE_ALL;
1210 		break;
1211 	default:
1212 		action = MAX_ETH_FILTER_ACTION;
1213 	}
1214 
1215 	return action;
1216 }
1217 
1218 static int
1219 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
1220 			u16 opaque_fid,
1221 			struct qed_filter_ucast *p_filter_cmd,
1222 			struct vport_filter_update_ramrod_data **pp_ramrod,
1223 			struct qed_spq_entry **pp_ent,
1224 			enum spq_mode comp_mode,
1225 			struct qed_spq_comp_cb *p_comp_data)
1226 {
1227 	u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1228 	struct vport_filter_update_ramrod_data *p_ramrod;
1229 	struct eth_filter_cmd *p_first_filter;
1230 	struct eth_filter_cmd *p_second_filter;
1231 	struct qed_sp_init_data init_data;
1232 	enum eth_filter_action action;
1233 	int rc;
1234 
1235 	rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1236 			  &vport_to_remove_from);
1237 	if (rc)
1238 		return rc;
1239 
1240 	rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1241 			  &vport_to_add_to);
1242 	if (rc)
1243 		return rc;
1244 
1245 	/* Get SPQ entry */
1246 	memset(&init_data, 0, sizeof(init_data));
1247 	init_data.cid = qed_spq_get_cid(p_hwfn);
1248 	init_data.opaque_fid = opaque_fid;
1249 	init_data.comp_mode = comp_mode;
1250 	init_data.p_comp_data = p_comp_data;
1251 
1252 	rc = qed_sp_init_request(p_hwfn, pp_ent,
1253 				 ETH_RAMROD_FILTERS_UPDATE,
1254 				 PROTOCOLID_ETH, &init_data);
1255 	if (rc)
1256 		return rc;
1257 
1258 	*pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1259 	p_ramrod = *pp_ramrod;
1260 	p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1261 	p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1262 
1263 	switch (p_filter_cmd->opcode) {
1264 	case QED_FILTER_REPLACE:
1265 	case QED_FILTER_MOVE:
1266 		p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
1267 	default:
1268 		p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
1269 	}
1270 
1271 	p_first_filter	= &p_ramrod->filter_cmds[0];
1272 	p_second_filter = &p_ramrod->filter_cmds[1];
1273 
1274 	switch (p_filter_cmd->type) {
1275 	case QED_FILTER_MAC:
1276 		p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
1277 	case QED_FILTER_VLAN:
1278 		p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
1279 	case QED_FILTER_MAC_VLAN:
1280 		p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
1281 	case QED_FILTER_INNER_MAC:
1282 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
1283 	case QED_FILTER_INNER_VLAN:
1284 		p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
1285 	case QED_FILTER_INNER_PAIR:
1286 		p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
1287 	case QED_FILTER_INNER_MAC_VNI_PAIR:
1288 		p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1289 		break;
1290 	case QED_FILTER_MAC_VNI_PAIR:
1291 		p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1292 	case QED_FILTER_VNI:
1293 		p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1294 	}
1295 
1296 	if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1297 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1298 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1299 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1300 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1301 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
1302 		qed_set_fw_mac_addr(&p_first_filter->mac_msb,
1303 				    &p_first_filter->mac_mid,
1304 				    &p_first_filter->mac_lsb,
1305 				    (u8 *)p_filter_cmd->mac);
1306 	}
1307 
1308 	if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1309 	    (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1310 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1311 	    (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1312 		p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
1313 
1314 	if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1315 	    (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1316 	    (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1317 		p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
1318 
1319 	if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
1320 		p_second_filter->type = p_first_filter->type;
1321 		p_second_filter->mac_msb = p_first_filter->mac_msb;
1322 		p_second_filter->mac_mid = p_first_filter->mac_mid;
1323 		p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1324 		p_second_filter->vlan_id = p_first_filter->vlan_id;
1325 		p_second_filter->vni = p_first_filter->vni;
1326 
1327 		p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1328 
1329 		p_first_filter->vport_id = vport_to_remove_from;
1330 
1331 		p_second_filter->action = ETH_FILTER_ACTION_ADD;
1332 		p_second_filter->vport_id = vport_to_add_to;
1333 	} else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
1334 		p_first_filter->vport_id = vport_to_add_to;
1335 		memcpy(p_second_filter, p_first_filter,
1336 		       sizeof(*p_second_filter));
1337 		p_first_filter->action	= ETH_FILTER_ACTION_REMOVE_ALL;
1338 		p_second_filter->action = ETH_FILTER_ACTION_ADD;
1339 	} else {
1340 		action = qed_filter_action(p_filter_cmd->opcode);
1341 
1342 		if (action == MAX_ETH_FILTER_ACTION) {
1343 			DP_NOTICE(p_hwfn,
1344 				  "%d is not supported yet\n",
1345 				  p_filter_cmd->opcode);
1346 			qed_sp_destroy_request(p_hwfn, *pp_ent);
1347 			return -EINVAL;
1348 		}
1349 
1350 		p_first_filter->action = action;
1351 		p_first_filter->vport_id = (p_filter_cmd->opcode ==
1352 					    QED_FILTER_REMOVE) ?
1353 					   vport_to_remove_from :
1354 					   vport_to_add_to;
1355 	}
1356 
1357 	return 0;
1358 }
1359 
1360 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1361 			    u16 opaque_fid,
1362 			    struct qed_filter_ucast *p_filter_cmd,
1363 			    enum spq_mode comp_mode,
1364 			    struct qed_spq_comp_cb *p_comp_data)
1365 {
1366 	struct vport_filter_update_ramrod_data	*p_ramrod	= NULL;
1367 	struct qed_spq_entry			*p_ent		= NULL;
1368 	struct eth_filter_cmd_header		*p_header;
1369 	int					rc;
1370 
1371 	rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1372 				     &p_ramrod, &p_ent,
1373 				     comp_mode, p_comp_data);
1374 	if (rc) {
1375 		DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1376 		return rc;
1377 	}
1378 	p_header = &p_ramrod->filter_cmd_hdr;
1379 	p_header->assert_on_error = p_filter_cmd->assert_on_error;
1380 
1381 	rc = qed_spq_post(p_hwfn, p_ent, NULL);
1382 	if (rc) {
1383 		DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1384 		return rc;
1385 	}
1386 
1387 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
1388 		   "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1389 		   (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1390 		   ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1391 		   "REMOVE" :
1392 		   ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1393 		    "MOVE" : "REPLACE")),
1394 		   (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1395 		   ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1396 		    "VLAN" : "MAC & VLAN"),
1397 		   p_ramrod->filter_cmd_hdr.cmd_cnt,
1398 		   p_filter_cmd->is_rx_filter,
1399 		   p_filter_cmd->is_tx_filter);
1400 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
1401 		   "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1402 		   p_filter_cmd->vport_to_add_to,
1403 		   p_filter_cmd->vport_to_remove_from,
1404 		   p_filter_cmd->mac[0],
1405 		   p_filter_cmd->mac[1],
1406 		   p_filter_cmd->mac[2],
1407 		   p_filter_cmd->mac[3],
1408 		   p_filter_cmd->mac[4],
1409 		   p_filter_cmd->mac[5],
1410 		   p_filter_cmd->vlan);
1411 
1412 	return 0;
1413 }
1414 
1415 /*******************************************************************************
1416  * Description:
1417  *         Calculates crc 32 on a buffer
1418  *         Note: crc32_length MUST be aligned to 8
1419  * Return:
1420  ******************************************************************************/
1421 static u32 qed_calc_crc32c(u8 *crc32_packet,
1422 			   u32 crc32_length, u32 crc32_seed, u8 complement)
1423 {
1424 	u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1425 	u8 msb = 0, current_byte = 0;
1426 
1427 	if ((!crc32_packet) ||
1428 	    (crc32_length == 0) ||
1429 	    ((crc32_length % 8) != 0))
1430 		return crc32_result;
1431 	for (byte = 0; byte < crc32_length; byte++) {
1432 		current_byte = crc32_packet[byte];
1433 		for (bit = 0; bit < 8; bit++) {
1434 			msb = (u8)(crc32_result >> 31);
1435 			crc32_result = crc32_result << 1;
1436 			if (msb != (0x1 & (current_byte >> bit))) {
1437 				crc32_result = crc32_result ^ CRC32_POLY;
1438 				crc32_result |= 1; /*crc32_result[0] = 1;*/
1439 			}
1440 		}
1441 	}
1442 	return crc32_result;
1443 }
1444 
1445 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
1446 {
1447 	u32 packet_buf[2] = { 0 };
1448 
1449 	memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1450 	return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1451 }
1452 
1453 u8 qed_mcast_bin_from_mac(u8 *mac)
1454 {
1455 	u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1456 				mac, ETH_ALEN);
1457 
1458 	return crc & 0xff;
1459 }
1460 
1461 static int
1462 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1463 			u16 opaque_fid,
1464 			struct qed_filter_mcast *p_filter_cmd,
1465 			enum spq_mode comp_mode,
1466 			struct qed_spq_comp_cb *p_comp_data)
1467 {
1468 	struct vport_update_ramrod_data *p_ramrod = NULL;
1469 	u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1470 	struct qed_spq_entry *p_ent = NULL;
1471 	struct qed_sp_init_data init_data;
1472 	u8 abs_vport_id = 0;
1473 	int rc, i;
1474 
1475 	if (p_filter_cmd->opcode == QED_FILTER_ADD)
1476 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1477 				  &abs_vport_id);
1478 	else
1479 		rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1480 				  &abs_vport_id);
1481 	if (rc)
1482 		return rc;
1483 
1484 	/* Get SPQ entry */
1485 	memset(&init_data, 0, sizeof(init_data));
1486 	init_data.cid = qed_spq_get_cid(p_hwfn);
1487 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1488 	init_data.comp_mode = comp_mode;
1489 	init_data.p_comp_data = p_comp_data;
1490 
1491 	rc = qed_sp_init_request(p_hwfn, &p_ent,
1492 				 ETH_RAMROD_VPORT_UPDATE,
1493 				 PROTOCOLID_ETH, &init_data);
1494 	if (rc) {
1495 		DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1496 		return rc;
1497 	}
1498 
1499 	p_ramrod = &p_ent->ramrod.vport_update;
1500 	p_ramrod->common.update_approx_mcast_flg = 1;
1501 
1502 	/* explicitly clear out the entire vector */
1503 	memset(&p_ramrod->approx_mcast.bins, 0,
1504 	       sizeof(p_ramrod->approx_mcast.bins));
1505 	memset(bins, 0, sizeof(bins));
1506 	/* filter ADD op is explicit set op and it removes
1507 	 *  any existing filters for the vport
1508 	 */
1509 	if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1510 		for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1511 			u32 bit, nbits;
1512 
1513 			bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1514 			nbits = sizeof(u32) * BITS_PER_BYTE;
1515 			bins[bit / nbits] |= 1 << (bit % nbits);
1516 		}
1517 
1518 		/* Convert to correct endianity */
1519 		for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1520 			struct vport_update_ramrod_mcast *p_ramrod_bins;
1521 
1522 			p_ramrod_bins = &p_ramrod->approx_mcast;
1523 			p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
1524 		}
1525 	}
1526 
1527 	p_ramrod->common.vport_id = abs_vport_id;
1528 
1529 	return qed_spq_post(p_hwfn, p_ent, NULL);
1530 }
1531 
1532 static int qed_filter_mcast_cmd(struct qed_dev *cdev,
1533 				struct qed_filter_mcast *p_filter_cmd,
1534 				enum spq_mode comp_mode,
1535 				struct qed_spq_comp_cb *p_comp_data)
1536 {
1537 	int rc = 0;
1538 	int i;
1539 
1540 	/* only ADD and REMOVE operations are supported for multi-cast */
1541 	if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1542 	     (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1543 	    (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1544 		return -EINVAL;
1545 
1546 	for_each_hwfn(cdev, i) {
1547 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1548 
1549 		u16 opaque_fid;
1550 
1551 		if (IS_VF(cdev)) {
1552 			qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1553 			continue;
1554 		}
1555 
1556 		opaque_fid = p_hwfn->hw_info.opaque_fid;
1557 
1558 		rc = qed_sp_eth_filter_mcast(p_hwfn,
1559 					     opaque_fid,
1560 					     p_filter_cmd,
1561 					     comp_mode, p_comp_data);
1562 	}
1563 	return rc;
1564 }
1565 
1566 static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1567 				struct qed_filter_ucast *p_filter_cmd,
1568 				enum spq_mode comp_mode,
1569 				struct qed_spq_comp_cb *p_comp_data)
1570 {
1571 	int rc = 0;
1572 	int i;
1573 
1574 	for_each_hwfn(cdev, i) {
1575 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1576 		u16 opaque_fid;
1577 
1578 		if (IS_VF(cdev)) {
1579 			rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1580 			continue;
1581 		}
1582 
1583 		opaque_fid = p_hwfn->hw_info.opaque_fid;
1584 
1585 		rc = qed_sp_eth_filter_ucast(p_hwfn,
1586 					     opaque_fid,
1587 					     p_filter_cmd,
1588 					     comp_mode, p_comp_data);
1589 		if (rc)
1590 			break;
1591 	}
1592 
1593 	return rc;
1594 }
1595 
1596 /* Statistics related code */
1597 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1598 					   u32 *p_addr,
1599 					   u32 *p_len, u16 statistics_bin)
1600 {
1601 	if (IS_PF(p_hwfn->cdev)) {
1602 		*p_addr = BAR0_MAP_REG_PSDM_RAM +
1603 		    PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1604 		*p_len = sizeof(struct eth_pstorm_per_queue_stat);
1605 	} else {
1606 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1607 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1608 
1609 		*p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1610 		*p_len = p_resp->pfdev_info.stats_info.pstats.len;
1611 	}
1612 }
1613 
1614 static noinline_for_stack void
1615 __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1616 		       struct qed_eth_stats *p_stats, u16 statistics_bin)
1617 {
1618 	struct eth_pstorm_per_queue_stat pstats;
1619 	u32 pstats_addr = 0, pstats_len = 0;
1620 
1621 	__qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1622 				       statistics_bin);
1623 
1624 	memset(&pstats, 0, sizeof(pstats));
1625 	qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1626 
1627 	p_stats->common.tx_ucast_bytes +=
1628 	    HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1629 	p_stats->common.tx_mcast_bytes +=
1630 	    HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1631 	p_stats->common.tx_bcast_bytes +=
1632 	    HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1633 	p_stats->common.tx_ucast_pkts +=
1634 	    HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1635 	p_stats->common.tx_mcast_pkts +=
1636 	    HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1637 	p_stats->common.tx_bcast_pkts +=
1638 	    HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1639 	p_stats->common.tx_err_drop_pkts +=
1640 	    HILO_64_REGPAIR(pstats.error_drop_pkts);
1641 }
1642 
1643 static noinline_for_stack void
1644 __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1645 		       struct qed_eth_stats *p_stats, u16 statistics_bin)
1646 {
1647 	struct tstorm_per_port_stat tstats;
1648 	u32 tstats_addr, tstats_len;
1649 
1650 	if (IS_PF(p_hwfn->cdev)) {
1651 		tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1652 		    TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1653 		tstats_len = sizeof(struct tstorm_per_port_stat);
1654 	} else {
1655 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1656 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1657 
1658 		tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1659 		tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1660 	}
1661 
1662 	memset(&tstats, 0, sizeof(tstats));
1663 	qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1664 
1665 	p_stats->common.mftag_filter_discards +=
1666 	    HILO_64_REGPAIR(tstats.mftag_filter_discard);
1667 	p_stats->common.mac_filter_discards +=
1668 	    HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1669 	p_stats->common.gft_filter_drop +=
1670 		HILO_64_REGPAIR(tstats.eth_gft_drop_pkt);
1671 }
1672 
1673 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1674 					   u32 *p_addr,
1675 					   u32 *p_len, u16 statistics_bin)
1676 {
1677 	if (IS_PF(p_hwfn->cdev)) {
1678 		*p_addr = BAR0_MAP_REG_USDM_RAM +
1679 		    USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1680 		*p_len = sizeof(struct eth_ustorm_per_queue_stat);
1681 	} else {
1682 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1683 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1684 
1685 		*p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1686 		*p_len = p_resp->pfdev_info.stats_info.ustats.len;
1687 	}
1688 }
1689 
1690 static noinline_for_stack
1691 void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1692 			    struct qed_eth_stats *p_stats, u16 statistics_bin)
1693 {
1694 	struct eth_ustorm_per_queue_stat ustats;
1695 	u32 ustats_addr = 0, ustats_len = 0;
1696 
1697 	__qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1698 				       statistics_bin);
1699 
1700 	memset(&ustats, 0, sizeof(ustats));
1701 	qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1702 
1703 	p_stats->common.rx_ucast_bytes +=
1704 	    HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1705 	p_stats->common.rx_mcast_bytes +=
1706 	    HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1707 	p_stats->common.rx_bcast_bytes +=
1708 	    HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1709 	p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1710 	p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1711 	p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1712 }
1713 
1714 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1715 					   u32 *p_addr,
1716 					   u32 *p_len, u16 statistics_bin)
1717 {
1718 	if (IS_PF(p_hwfn->cdev)) {
1719 		*p_addr = BAR0_MAP_REG_MSDM_RAM +
1720 		    MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1721 		*p_len = sizeof(struct eth_mstorm_per_queue_stat);
1722 	} else {
1723 		struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1724 		struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1725 
1726 		*p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1727 		*p_len = p_resp->pfdev_info.stats_info.mstats.len;
1728 	}
1729 }
1730 
1731 static noinline_for_stack void
1732 __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1733 		       struct qed_eth_stats *p_stats, u16 statistics_bin)
1734 {
1735 	struct eth_mstorm_per_queue_stat mstats;
1736 	u32 mstats_addr = 0, mstats_len = 0;
1737 
1738 	__qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1739 				       statistics_bin);
1740 
1741 	memset(&mstats, 0, sizeof(mstats));
1742 	qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1743 
1744 	p_stats->common.no_buff_discards +=
1745 	    HILO_64_REGPAIR(mstats.no_buff_discard);
1746 	p_stats->common.packet_too_big_discard +=
1747 	    HILO_64_REGPAIR(mstats.packet_too_big_discard);
1748 	p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1749 	p_stats->common.tpa_coalesced_pkts +=
1750 	    HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1751 	p_stats->common.tpa_coalesced_events +=
1752 	    HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1753 	p_stats->common.tpa_aborts_num +=
1754 	    HILO_64_REGPAIR(mstats.tpa_aborts_num);
1755 	p_stats->common.tpa_coalesced_bytes +=
1756 	    HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1757 }
1758 
1759 static noinline_for_stack void
1760 __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
1761 			   struct qed_eth_stats *p_stats)
1762 {
1763 	struct qed_eth_stats_common *p_common = &p_stats->common;
1764 	struct port_stats port_stats;
1765 	int j;
1766 
1767 	memset(&port_stats, 0, sizeof(port_stats));
1768 
1769 	qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1770 			p_hwfn->mcp_info->port_addr +
1771 			offsetof(struct public_port, stats),
1772 			sizeof(port_stats));
1773 
1774 	p_common->rx_64_byte_packets += port_stats.eth.r64;
1775 	p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1776 	p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1777 	p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1778 	p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1779 	p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1780 	p_common->rx_crc_errors += port_stats.eth.rfcs;
1781 	p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1782 	p_common->rx_pause_frames += port_stats.eth.rxpf;
1783 	p_common->rx_pfc_frames += port_stats.eth.rxpp;
1784 	p_common->rx_align_errors += port_stats.eth.raln;
1785 	p_common->rx_carrier_errors += port_stats.eth.rfcr;
1786 	p_common->rx_oversize_packets += port_stats.eth.rovr;
1787 	p_common->rx_jabbers += port_stats.eth.rjbr;
1788 	p_common->rx_undersize_packets += port_stats.eth.rund;
1789 	p_common->rx_fragments += port_stats.eth.rfrg;
1790 	p_common->tx_64_byte_packets += port_stats.eth.t64;
1791 	p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1792 	p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1793 	p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1794 	p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1795 	p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1796 	p_common->tx_pause_frames += port_stats.eth.txpf;
1797 	p_common->tx_pfc_frames += port_stats.eth.txpp;
1798 	p_common->rx_mac_bytes += port_stats.eth.rbyte;
1799 	p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1800 	p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1801 	p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1802 	p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1803 	p_common->tx_mac_bytes += port_stats.eth.tbyte;
1804 	p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1805 	p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1806 	p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1807 	p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1808 	for (j = 0; j < 8; j++) {
1809 		p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1810 		p_common->brb_discards += port_stats.brb.brb_discard[j];
1811 	}
1812 
1813 	if (QED_IS_BB(p_hwfn->cdev)) {
1814 		struct qed_eth_stats_bb *p_bb = &p_stats->bb;
1815 
1816 		p_bb->rx_1519_to_1522_byte_packets +=
1817 		    port_stats.eth.u0.bb0.r1522;
1818 		p_bb->rx_1519_to_2047_byte_packets +=
1819 		    port_stats.eth.u0.bb0.r2047;
1820 		p_bb->rx_2048_to_4095_byte_packets +=
1821 		    port_stats.eth.u0.bb0.r4095;
1822 		p_bb->rx_4096_to_9216_byte_packets +=
1823 		    port_stats.eth.u0.bb0.r9216;
1824 		p_bb->rx_9217_to_16383_byte_packets +=
1825 		    port_stats.eth.u0.bb0.r16383;
1826 		p_bb->tx_1519_to_2047_byte_packets +=
1827 		    port_stats.eth.u1.bb1.t2047;
1828 		p_bb->tx_2048_to_4095_byte_packets +=
1829 		    port_stats.eth.u1.bb1.t4095;
1830 		p_bb->tx_4096_to_9216_byte_packets +=
1831 		    port_stats.eth.u1.bb1.t9216;
1832 		p_bb->tx_9217_to_16383_byte_packets +=
1833 		    port_stats.eth.u1.bb1.t16383;
1834 		p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1835 		p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1836 	} else {
1837 		struct qed_eth_stats_ah *p_ah = &p_stats->ah;
1838 
1839 		p_ah->rx_1519_to_max_byte_packets +=
1840 		    port_stats.eth.u0.ah0.r1519_to_max;
1841 		p_ah->tx_1519_to_max_byte_packets =
1842 		    port_stats.eth.u1.ah1.t1519_to_max;
1843 	}
1844 
1845 	p_common->link_change_count = qed_rd(p_hwfn, p_ptt,
1846 					     p_hwfn->mcp_info->port_addr +
1847 					     offsetof(struct public_port,
1848 						      link_change_count));
1849 }
1850 
1851 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1852 				  struct qed_ptt *p_ptt,
1853 				  struct qed_eth_stats *stats,
1854 				  u16 statistics_bin, bool b_get_port_stats)
1855 {
1856 	__qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1857 	__qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1858 	__qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1859 	__qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1860 
1861 	if (b_get_port_stats && p_hwfn->mcp_info)
1862 		__qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1863 }
1864 
1865 static void _qed_get_vport_stats(struct qed_dev *cdev,
1866 				 struct qed_eth_stats *stats)
1867 {
1868 	u8 fw_vport = 0;
1869 	int i;
1870 
1871 	memset(stats, 0, sizeof(*stats));
1872 
1873 	for_each_hwfn(cdev, i) {
1874 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1875 		struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1876 						    :  NULL;
1877 		bool b_get_port_stats;
1878 
1879 		if (IS_PF(cdev)) {
1880 			/* The main vport index is relative first */
1881 			if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1882 				DP_ERR(p_hwfn, "No vport available!\n");
1883 				goto out;
1884 			}
1885 		}
1886 
1887 		if (IS_PF(cdev) && !p_ptt) {
1888 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1889 			continue;
1890 		}
1891 
1892 		b_get_port_stats = IS_PF(cdev) && IS_LEAD_HWFN(p_hwfn);
1893 		__qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1894 				      b_get_port_stats);
1895 
1896 out:
1897 		if (IS_PF(cdev) && p_ptt)
1898 			qed_ptt_release(p_hwfn, p_ptt);
1899 	}
1900 }
1901 
1902 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
1903 {
1904 	u32 i;
1905 
1906 	if (!cdev) {
1907 		memset(stats, 0, sizeof(*stats));
1908 		return;
1909 	}
1910 
1911 	_qed_get_vport_stats(cdev, stats);
1912 
1913 	if (!cdev->reset_stats)
1914 		return;
1915 
1916 	/* Reduce the statistics baseline */
1917 	for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1918 		((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1919 }
1920 
1921 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1922 void qed_reset_vport_stats(struct qed_dev *cdev)
1923 {
1924 	int i;
1925 
1926 	for_each_hwfn(cdev, i) {
1927 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1928 		struct eth_mstorm_per_queue_stat mstats;
1929 		struct eth_ustorm_per_queue_stat ustats;
1930 		struct eth_pstorm_per_queue_stat pstats;
1931 		struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1932 						    : NULL;
1933 		u32 addr = 0, len = 0;
1934 
1935 		if (IS_PF(cdev) && !p_ptt) {
1936 			DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1937 			continue;
1938 		}
1939 
1940 		memset(&mstats, 0, sizeof(mstats));
1941 		__qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1942 		qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1943 
1944 		memset(&ustats, 0, sizeof(ustats));
1945 		__qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1946 		qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1947 
1948 		memset(&pstats, 0, sizeof(pstats));
1949 		__qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1950 		qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1951 
1952 		if (IS_PF(cdev))
1953 			qed_ptt_release(p_hwfn, p_ptt);
1954 	}
1955 
1956 	/* PORT statistics are not necessarily reset, so we need to
1957 	 * read and create a baseline for future statistics.
1958 	 * Link change stat is maintained by MFW, return its value as is.
1959 	 */
1960 	if (!cdev->reset_stats) {
1961 		DP_INFO(cdev, "Reset stats not allocated\n");
1962 	} else {
1963 		_qed_get_vport_stats(cdev, cdev->reset_stats);
1964 		cdev->reset_stats->common.link_change_count = 0;
1965 	}
1966 }
1967 
1968 static enum gft_profile_type
1969 qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
1970 {
1971 	if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
1972 		return GFT_PROFILE_TYPE_4_TUPLE;
1973 	if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
1974 		return GFT_PROFILE_TYPE_IP_DST_ADDR;
1975 	if (mode == QED_FILTER_CONFIG_MODE_IP_SRC)
1976 		return GFT_PROFILE_TYPE_IP_SRC_ADDR;
1977 	return GFT_PROFILE_TYPE_L4_DST_PORT;
1978 }
1979 
1980 void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
1981 			     struct qed_ptt *p_ptt,
1982 			     struct qed_arfs_config_params *p_cfg_params)
1983 {
1984 	if (test_bit(QED_MF_DISABLE_ARFS, &p_hwfn->cdev->mf_bits))
1985 		return;
1986 
1987 	if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
1988 		qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
1989 			       p_cfg_params->tcp,
1990 			       p_cfg_params->udp,
1991 			       p_cfg_params->ipv4,
1992 			       p_cfg_params->ipv6,
1993 			       qed_arfs_mode_to_hsi(p_cfg_params->mode));
1994 		DP_VERBOSE(p_hwfn,
1995 			   QED_MSG_SP,
1996 			   "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
1997 			   p_cfg_params->tcp ? "Enable" : "Disable",
1998 			   p_cfg_params->udp ? "Enable" : "Disable",
1999 			   p_cfg_params->ipv4 ? "Enable" : "Disable",
2000 			   p_cfg_params->ipv6 ? "Enable" : "Disable",
2001 			   (u32)p_cfg_params->mode);
2002 	} else {
2003 		DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
2004 		qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2005 	}
2006 }
2007 
2008 int
2009 qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
2010 				struct qed_spq_comp_cb *p_cb,
2011 				struct qed_ntuple_filter_params *p_params)
2012 {
2013 	struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL;
2014 	struct qed_spq_entry *p_ent = NULL;
2015 	struct qed_sp_init_data init_data;
2016 	u16 abs_rx_q_id = 0;
2017 	u8 abs_vport_id = 0;
2018 	int rc = -EINVAL;
2019 
2020 	/* Get SPQ entry */
2021 	memset(&init_data, 0, sizeof(init_data));
2022 	init_data.cid = qed_spq_get_cid(p_hwfn);
2023 
2024 	init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2025 
2026 	if (p_cb) {
2027 		init_data.comp_mode = QED_SPQ_MODE_CB;
2028 		init_data.p_comp_data = p_cb;
2029 	} else {
2030 		init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2031 	}
2032 
2033 	rc = qed_sp_init_request(p_hwfn, &p_ent,
2034 				 ETH_RAMROD_RX_UPDATE_GFT_FILTER,
2035 				 PROTOCOLID_ETH, &init_data);
2036 	if (rc)
2037 		return rc;
2038 
2039 	p_ramrod = &p_ent->ramrod.rx_update_gft;
2040 
2041 	DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
2042 	p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
2043 
2044 	if (p_params->b_is_drop) {
2045 		p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT);
2046 	} else {
2047 		rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
2048 		if (rc)
2049 			goto err;
2050 
2051 		if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
2052 			rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
2053 					     &abs_rx_q_id);
2054 			if (rc)
2055 				goto err;
2056 
2057 			p_ramrod->rx_qid_valid = 1;
2058 			p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
2059 		}
2060 
2061 		p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
2062 	}
2063 
2064 	p_ramrod->flow_id_valid = 0;
2065 	p_ramrod->flow_id = 0;
2066 	p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
2067 	    : GFT_DELETE_FILTER;
2068 
2069 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
2070 		   "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
2071 		   abs_vport_id, abs_rx_q_id,
2072 		   p_params->b_is_add ? "Adding" : "Removing",
2073 		   (u64)p_params->addr, p_params->length);
2074 
2075 	return qed_spq_post(p_hwfn, p_ent, NULL);
2076 
2077 err:
2078 	qed_sp_destroy_request(p_hwfn, p_ent);
2079 	return rc;
2080 }
2081 
2082 int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
2083 			 struct qed_ptt *p_ptt,
2084 			 struct qed_queue_cid *p_cid, u16 *p_rx_coal)
2085 {
2086 	u32 coalesce, address, is_valid;
2087 	struct cau_sb_entry sb_entry;
2088 	u8 timer_res;
2089 	int rc;
2090 
2091 	rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2092 			       p_cid->sb_igu_id * sizeof(u64),
2093 			       (u64)(uintptr_t)&sb_entry, 2, NULL);
2094 	if (rc) {
2095 		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2096 		return rc;
2097 	}
2098 
2099 	timer_res = GET_FIELD(le32_to_cpu(sb_entry.params),
2100 			      CAU_SB_ENTRY_TIMER_RES0);
2101 
2102 	address = BAR0_MAP_REG_USDM_RAM +
2103 		  USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
2104 	coalesce = qed_rd(p_hwfn, p_ptt, address);
2105 
2106 	is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2107 	if (!is_valid)
2108 		return -EINVAL;
2109 
2110 	coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2111 	*p_rx_coal = (u16)(coalesce << timer_res);
2112 
2113 	return 0;
2114 }
2115 
2116 int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
2117 			 struct qed_ptt *p_ptt,
2118 			 struct qed_queue_cid *p_cid, u16 *p_tx_coal)
2119 {
2120 	u32 coalesce, address, is_valid;
2121 	struct cau_sb_entry sb_entry;
2122 	u8 timer_res;
2123 	int rc;
2124 
2125 	rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2126 			       p_cid->sb_igu_id * sizeof(u64),
2127 			       (u64)(uintptr_t)&sb_entry, 2, NULL);
2128 	if (rc) {
2129 		DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2130 		return rc;
2131 	}
2132 
2133 	timer_res = GET_FIELD(le32_to_cpu(sb_entry.params),
2134 			      CAU_SB_ENTRY_TIMER_RES1);
2135 
2136 	address = BAR0_MAP_REG_XSDM_RAM +
2137 		  XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
2138 	coalesce = qed_rd(p_hwfn, p_ptt, address);
2139 
2140 	is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2141 	if (!is_valid)
2142 		return -EINVAL;
2143 
2144 	coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2145 	*p_tx_coal = (u16)(coalesce << timer_res);
2146 
2147 	return 0;
2148 }
2149 
2150 int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle)
2151 {
2152 	struct qed_queue_cid *p_cid = handle;
2153 	struct qed_ptt *p_ptt;
2154 	int rc = 0;
2155 
2156 	if (IS_VF(p_hwfn->cdev)) {
2157 		rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
2158 		if (rc)
2159 			DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
2160 
2161 		return rc;
2162 	}
2163 
2164 	p_ptt = qed_ptt_acquire(p_hwfn);
2165 	if (!p_ptt)
2166 		return -EAGAIN;
2167 
2168 	if (p_cid->b_is_rx) {
2169 		rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2170 		if (rc)
2171 			goto out;
2172 	} else {
2173 		rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2174 		if (rc)
2175 			goto out;
2176 	}
2177 
2178 out:
2179 	qed_ptt_release(p_hwfn, p_ptt);
2180 
2181 	return rc;
2182 }
2183 
2184 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2185 				 struct qed_dev_eth_info *info)
2186 {
2187 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2188 	int i;
2189 
2190 	memset(info, 0, sizeof(*info));
2191 
2192 	if (IS_PF(cdev)) {
2193 		int max_vf_vlan_filters = 0;
2194 		int max_vf_mac_filters = 0;
2195 
2196 		info->num_tc = p_hwfn->hw_info.num_hw_tc;
2197 
2198 		if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
2199 			u16 num_queues = 0;
2200 
2201 			/* Since the feature controls only queue-zones,
2202 			 * make sure we have the contexts [rx, xdp, tcs] to
2203 			 * match.
2204 			 */
2205 			for_each_hwfn(cdev, i) {
2206 				struct qed_hwfn *hwfn = &cdev->hwfns[i];
2207 				u16 l2_queues = (u16)FEAT_NUM(hwfn,
2208 							      QED_PF_L2_QUE);
2209 				u16 cids;
2210 
2211 				cids = hwfn->pf_params.eth_pf_params.num_cons;
2212 				cids /= (2 + info->num_tc);
2213 				num_queues += min_t(u16, l2_queues, cids);
2214 			}
2215 
2216 			/* queues might theoretically be >256, but interrupts'
2217 			 * upper-limit guarantes that it would fit in a u8.
2218 			 */
2219 			if (cdev->int_params.fp_msix_cnt) {
2220 				u8 irqs = cdev->int_params.fp_msix_cnt;
2221 
2222 				info->num_queues = (u8)min_t(u16,
2223 							     num_queues, irqs);
2224 			}
2225 		} else {
2226 			info->num_queues = cdev->num_hwfns;
2227 		}
2228 
2229 		if (IS_QED_SRIOV(cdev)) {
2230 			max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
2231 					      QED_ETH_VF_NUM_VLAN_FILTERS;
2232 			max_vf_mac_filters = cdev->p_iov_info->total_vfs *
2233 					     QED_ETH_VF_NUM_MAC_FILTERS;
2234 		}
2235 		info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2236 						  QED_VLAN) -
2237 					 max_vf_vlan_filters;
2238 		info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2239 						 QED_MAC) -
2240 					max_vf_mac_filters;
2241 
2242 		ether_addr_copy(info->port_mac,
2243 				cdev->hwfns[0].hw_info.hw_mac_addr);
2244 
2245 		info->xdp_supported = true;
2246 	} else {
2247 		u16 total_cids = 0;
2248 
2249 		info->num_tc = 1;
2250 
2251 		/* Determine queues &  XDP support */
2252 		for_each_hwfn(cdev, i) {
2253 			struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2254 			u8 queues, cids;
2255 
2256 			qed_vf_get_num_cids(p_hwfn, &cids);
2257 			qed_vf_get_num_rxqs(p_hwfn, &queues);
2258 			info->num_queues += queues;
2259 			total_cids += cids;
2260 		}
2261 
2262 		/* Enable VF XDP in case PF guarntees sufficient connections */
2263 		if (total_cids >= info->num_queues * 3)
2264 			info->xdp_supported = true;
2265 
2266 		qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
2267 					    (u8 *)&info->num_vlan_filters);
2268 		qed_vf_get_num_mac_filters(&cdev->hwfns[0],
2269 					   (u8 *)&info->num_mac_filters);
2270 		qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
2271 
2272 		info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
2273 	}
2274 
2275 	qed_fill_dev_info(cdev, &info->common);
2276 
2277 	if (IS_VF(cdev))
2278 		eth_zero_addr(info->common.hw_mac);
2279 
2280 	return 0;
2281 }
2282 
2283 static void qed_register_eth_ops(struct qed_dev *cdev,
2284 				 struct qed_eth_cb_ops *ops, void *cookie)
2285 {
2286 	cdev->protocol_ops.eth = ops;
2287 	cdev->ops_cookie = cookie;
2288 
2289 	/* For VF, we start bulletin reading */
2290 	if (IS_VF(cdev))
2291 		qed_vf_start_iov_wq(cdev);
2292 }
2293 
2294 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
2295 {
2296 	if (IS_PF(cdev))
2297 		return true;
2298 
2299 	return qed_vf_check_mac(&cdev->hwfns[0], mac);
2300 }
2301 
2302 static int qed_start_vport(struct qed_dev *cdev,
2303 			   struct qed_start_vport_params *params)
2304 {
2305 	int rc, i;
2306 
2307 	for_each_hwfn(cdev, i) {
2308 		struct qed_sp_vport_start_params start = { 0 };
2309 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2310 
2311 		start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
2312 							QED_TPA_MODE_NONE;
2313 		start.remove_inner_vlan = params->remove_inner_vlan;
2314 		start.only_untagged = true;	/* untagged only */
2315 		start.drop_ttl0 = params->drop_ttl0;
2316 		start.opaque_fid = p_hwfn->hw_info.opaque_fid;
2317 		start.concrete_fid = p_hwfn->hw_info.concrete_fid;
2318 		start.handle_ptp_pkts = params->handle_ptp_pkts;
2319 		start.vport_id = params->vport_id;
2320 		start.max_buffers_per_cqe = 16;
2321 		start.mtu = params->mtu;
2322 
2323 		rc = qed_sp_vport_start(p_hwfn, &start);
2324 		if (rc) {
2325 			DP_ERR(cdev, "Failed to start VPORT\n");
2326 			return rc;
2327 		}
2328 
2329 		rc = qed_hw_start_fastpath(p_hwfn);
2330 		if (rc) {
2331 			DP_ERR(cdev, "Failed to start VPORT fastpath\n");
2332 			return rc;
2333 		}
2334 
2335 		DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2336 			   "Started V-PORT %d with MTU %d\n",
2337 			   start.vport_id, start.mtu);
2338 	}
2339 
2340 	if (params->clear_stats)
2341 		qed_reset_vport_stats(cdev);
2342 
2343 	return 0;
2344 }
2345 
2346 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
2347 {
2348 	int rc, i;
2349 
2350 	for_each_hwfn(cdev, i) {
2351 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2352 
2353 		rc = qed_sp_vport_stop(p_hwfn,
2354 				       p_hwfn->hw_info.opaque_fid, vport_id);
2355 
2356 		if (rc) {
2357 			DP_ERR(cdev, "Failed to stop VPORT\n");
2358 			return rc;
2359 		}
2360 	}
2361 	return 0;
2362 }
2363 
2364 static int qed_update_vport_rss(struct qed_dev *cdev,
2365 				struct qed_update_vport_rss_params *input,
2366 				struct qed_rss_params *rss)
2367 {
2368 	int i, fn;
2369 
2370 	/* Update configuration with what's correct regardless of CMT */
2371 	rss->update_rss_config = 1;
2372 	rss->rss_enable = 1;
2373 	rss->update_rss_capabilities = 1;
2374 	rss->update_rss_ind_table = 1;
2375 	rss->update_rss_key = 1;
2376 	rss->rss_caps = input->rss_caps;
2377 	memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
2378 
2379 	/* In regular scenario, we'd simply need to take input handlers.
2380 	 * But in CMT, we'd have to split the handlers according to the
2381 	 * engine they were configured on. We'd then have to understand
2382 	 * whether RSS is really required, since 2-queues on CMT doesn't
2383 	 * require RSS.
2384 	 */
2385 	if (cdev->num_hwfns == 1) {
2386 		memcpy(rss->rss_ind_table,
2387 		       input->rss_ind_table,
2388 		       QED_RSS_IND_TABLE_SIZE * sizeof(void *));
2389 		rss->rss_table_size_log = 7;
2390 		return 0;
2391 	}
2392 
2393 	/* Start by copying the non-spcific information to the 2nd copy */
2394 	memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
2395 
2396 	/* CMT should be round-robin */
2397 	for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
2398 		struct qed_queue_cid *cid = input->rss_ind_table[i];
2399 		struct qed_rss_params *t_rss;
2400 
2401 		if (cid->p_owner == QED_LEADING_HWFN(cdev))
2402 			t_rss = &rss[0];
2403 		else
2404 			t_rss = &rss[1];
2405 
2406 		t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
2407 	}
2408 
2409 	/* Make sure RSS is actually required */
2410 	for_each_hwfn(cdev, fn) {
2411 		for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
2412 			if (rss[fn].rss_ind_table[i] !=
2413 			    rss[fn].rss_ind_table[0])
2414 				break;
2415 		}
2416 		if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
2417 			DP_VERBOSE(cdev, NETIF_MSG_IFUP,
2418 				   "CMT - 1 queue per-hwfn; Disabling RSS\n");
2419 			return -EINVAL;
2420 		}
2421 		rss[fn].rss_table_size_log = 6;
2422 	}
2423 
2424 	return 0;
2425 }
2426 
2427 static int qed_update_vport(struct qed_dev *cdev,
2428 			    struct qed_update_vport_params *params)
2429 {
2430 	struct qed_sp_vport_update_params sp_params;
2431 	struct qed_rss_params *rss;
2432 	int rc = 0, i;
2433 
2434 	if (!cdev)
2435 		return -ENODEV;
2436 
2437 	rss = vzalloc(array_size(sizeof(*rss), cdev->num_hwfns));
2438 	if (!rss)
2439 		return -ENOMEM;
2440 
2441 	memset(&sp_params, 0, sizeof(sp_params));
2442 
2443 	/* Translate protocol params into sp params */
2444 	sp_params.vport_id = params->vport_id;
2445 	sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
2446 	sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
2447 	sp_params.vport_active_rx_flg = params->vport_active_flg;
2448 	sp_params.vport_active_tx_flg = params->vport_active_flg;
2449 	sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
2450 	sp_params.tx_switching_flg = params->tx_switching_flg;
2451 	sp_params.accept_any_vlan = params->accept_any_vlan;
2452 	sp_params.update_accept_any_vlan_flg =
2453 		params->update_accept_any_vlan_flg;
2454 
2455 	/* Prepare the RSS configuration */
2456 	if (params->update_rss_flg)
2457 		if (qed_update_vport_rss(cdev, &params->rss_params, rss))
2458 			params->update_rss_flg = 0;
2459 
2460 	for_each_hwfn(cdev, i) {
2461 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2462 
2463 		if (params->update_rss_flg)
2464 			sp_params.rss_params = &rss[i];
2465 
2466 		sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2467 		rc = qed_sp_vport_update(p_hwfn, &sp_params,
2468 					 QED_SPQ_MODE_EBLOCK,
2469 					 NULL);
2470 		if (rc) {
2471 			DP_ERR(cdev, "Failed to update VPORT\n");
2472 			goto out;
2473 		}
2474 
2475 		DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2476 			   "Updated V-PORT %d: active_flag %d [update %d]\n",
2477 			   params->vport_id, params->vport_active_flg,
2478 			   params->update_vport_active_flg);
2479 	}
2480 
2481 out:
2482 	vfree(rss);
2483 	return rc;
2484 }
2485 
2486 static int qed_start_rxq(struct qed_dev *cdev,
2487 			 u8 rss_num,
2488 			 struct qed_queue_start_common_params *p_params,
2489 			 u16 bd_max_bytes,
2490 			 dma_addr_t bd_chain_phys_addr,
2491 			 dma_addr_t cqe_pbl_addr,
2492 			 u16 cqe_pbl_size,
2493 			 struct qed_rxq_start_ret_params *ret_params)
2494 {
2495 	struct qed_hwfn *p_hwfn;
2496 	int rc, hwfn_index;
2497 
2498 	hwfn_index = rss_num % cdev->num_hwfns;
2499 	p_hwfn = &cdev->hwfns[hwfn_index];
2500 
2501 	p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2502 	p_params->stats_id = p_params->vport_id;
2503 
2504 	rc = qed_eth_rx_queue_start(p_hwfn,
2505 				    p_hwfn->hw_info.opaque_fid,
2506 				    p_params,
2507 				    bd_max_bytes,
2508 				    bd_chain_phys_addr,
2509 				    cqe_pbl_addr, cqe_pbl_size, ret_params);
2510 	if (rc) {
2511 		DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
2512 		return rc;
2513 	}
2514 
2515 	DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2516 		   "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2517 		   p_params->queue_id, rss_num, p_params->vport_id,
2518 		   p_params->p_sb->igu_sb_id);
2519 
2520 	return 0;
2521 }
2522 
2523 static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
2524 {
2525 	int rc, hwfn_index;
2526 	struct qed_hwfn *p_hwfn;
2527 
2528 	hwfn_index = rss_id % cdev->num_hwfns;
2529 	p_hwfn = &cdev->hwfns[hwfn_index];
2530 
2531 	rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
2532 	if (rc) {
2533 		DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
2534 		return rc;
2535 	}
2536 
2537 	return 0;
2538 }
2539 
2540 static int qed_start_txq(struct qed_dev *cdev,
2541 			 u8 rss_num,
2542 			 struct qed_queue_start_common_params *p_params,
2543 			 dma_addr_t pbl_addr,
2544 			 u16 pbl_size,
2545 			 struct qed_txq_start_ret_params *ret_params)
2546 {
2547 	struct qed_hwfn *p_hwfn;
2548 	int rc, hwfn_index;
2549 
2550 	hwfn_index = rss_num % cdev->num_hwfns;
2551 	p_hwfn = &cdev->hwfns[hwfn_index];
2552 	p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2553 	p_params->stats_id = p_params->vport_id;
2554 
2555 	rc = qed_eth_tx_queue_start(p_hwfn,
2556 				    p_hwfn->hw_info.opaque_fid,
2557 				    p_params, p_params->tc,
2558 				    pbl_addr, pbl_size, ret_params);
2559 
2560 	if (rc) {
2561 		DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
2562 		return rc;
2563 	}
2564 
2565 	DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2566 		   "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2567 		   p_params->queue_id, rss_num, p_params->vport_id,
2568 		   p_params->p_sb->igu_sb_id);
2569 
2570 	return 0;
2571 }
2572 
2573 #define QED_HW_STOP_RETRY_LIMIT (10)
2574 static int qed_fastpath_stop(struct qed_dev *cdev)
2575 {
2576 	int rc;
2577 
2578 	rc = qed_hw_stop_fastpath(cdev);
2579 	if (rc) {
2580 		DP_ERR(cdev, "Failed to stop Fastpath\n");
2581 		return rc;
2582 	}
2583 
2584 	return 0;
2585 }
2586 
2587 static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
2588 {
2589 	struct qed_hwfn *p_hwfn;
2590 	int rc, hwfn_index;
2591 
2592 	hwfn_index = rss_id % cdev->num_hwfns;
2593 	p_hwfn = &cdev->hwfns[hwfn_index];
2594 
2595 	rc = qed_eth_tx_queue_stop(p_hwfn, handle);
2596 	if (rc) {
2597 		DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
2598 		return rc;
2599 	}
2600 
2601 	return 0;
2602 }
2603 
2604 static int qed_tunn_configure(struct qed_dev *cdev,
2605 			      struct qed_tunn_params *tunn_params)
2606 {
2607 	struct qed_tunnel_info tunn_info;
2608 	int i, rc;
2609 
2610 	memset(&tunn_info, 0, sizeof(tunn_info));
2611 	if (tunn_params->update_vxlan_port) {
2612 		tunn_info.vxlan_port.b_update_port = true;
2613 		tunn_info.vxlan_port.port = tunn_params->vxlan_port;
2614 	}
2615 
2616 	if (tunn_params->update_geneve_port) {
2617 		tunn_info.geneve_port.b_update_port = true;
2618 		tunn_info.geneve_port.port = tunn_params->geneve_port;
2619 	}
2620 
2621 	for_each_hwfn(cdev, i) {
2622 		struct qed_hwfn *hwfn = &cdev->hwfns[i];
2623 		struct qed_ptt *p_ptt;
2624 		struct qed_tunnel_info *tun;
2625 
2626 		tun = &hwfn->cdev->tunnel;
2627 		if (IS_PF(cdev)) {
2628 			p_ptt = qed_ptt_acquire(hwfn);
2629 			if (!p_ptt)
2630 				return -EAGAIN;
2631 		} else {
2632 			p_ptt = NULL;
2633 		}
2634 
2635 		rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
2636 					       QED_SPQ_MODE_EBLOCK, NULL);
2637 		if (rc) {
2638 			if (IS_PF(cdev))
2639 				qed_ptt_release(hwfn, p_ptt);
2640 			return rc;
2641 		}
2642 
2643 		if (IS_PF_SRIOV(hwfn)) {
2644 			u16 vxlan_port, geneve_port;
2645 			int j;
2646 
2647 			vxlan_port = tun->vxlan_port.port;
2648 			geneve_port = tun->geneve_port.port;
2649 
2650 			qed_for_each_vf(hwfn, j) {
2651 				qed_iov_bulletin_set_udp_ports(hwfn, j,
2652 							       vxlan_port,
2653 							       geneve_port);
2654 			}
2655 
2656 			qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2657 		}
2658 		if (IS_PF(cdev))
2659 			qed_ptt_release(hwfn, p_ptt);
2660 	}
2661 
2662 	return 0;
2663 }
2664 
2665 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2666 					enum qed_filter_rx_mode_type type)
2667 {
2668 	struct qed_filter_accept_flags accept_flags;
2669 
2670 	memset(&accept_flags, 0, sizeof(accept_flags));
2671 
2672 	accept_flags.update_rx_mode_config = 1;
2673 	accept_flags.update_tx_mode_config = 1;
2674 	accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2675 					QED_ACCEPT_MCAST_MATCHED |
2676 					QED_ACCEPT_BCAST;
2677 	accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2678 					QED_ACCEPT_MCAST_MATCHED |
2679 					QED_ACCEPT_BCAST;
2680 
2681 	if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2682 		accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2683 						 QED_ACCEPT_MCAST_UNMATCHED;
2684 		accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2685 						 QED_ACCEPT_MCAST_UNMATCHED;
2686 	} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2687 		accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2688 		accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2689 	}
2690 
2691 	return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
2692 				     QED_SPQ_MODE_CB, NULL);
2693 }
2694 
2695 static int qed_configure_filter_ucast(struct qed_dev *cdev,
2696 				      struct qed_filter_ucast_params *params)
2697 {
2698 	struct qed_filter_ucast ucast;
2699 
2700 	if (!params->vlan_valid && !params->mac_valid) {
2701 		DP_NOTICE(cdev,
2702 			  "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
2703 		return -EINVAL;
2704 	}
2705 
2706 	memset(&ucast, 0, sizeof(ucast));
2707 	switch (params->type) {
2708 	case QED_FILTER_XCAST_TYPE_ADD:
2709 		ucast.opcode = QED_FILTER_ADD;
2710 		break;
2711 	case QED_FILTER_XCAST_TYPE_DEL:
2712 		ucast.opcode = QED_FILTER_REMOVE;
2713 		break;
2714 	case QED_FILTER_XCAST_TYPE_REPLACE:
2715 		ucast.opcode = QED_FILTER_REPLACE;
2716 		break;
2717 	default:
2718 		DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
2719 			  params->type);
2720 	}
2721 
2722 	if (params->vlan_valid && params->mac_valid) {
2723 		ucast.type = QED_FILTER_MAC_VLAN;
2724 		ether_addr_copy(ucast.mac, params->mac);
2725 		ucast.vlan = params->vlan;
2726 	} else if (params->mac_valid) {
2727 		ucast.type = QED_FILTER_MAC;
2728 		ether_addr_copy(ucast.mac, params->mac);
2729 	} else {
2730 		ucast.type = QED_FILTER_VLAN;
2731 		ucast.vlan = params->vlan;
2732 	}
2733 
2734 	ucast.is_rx_filter = true;
2735 	ucast.is_tx_filter = true;
2736 
2737 	return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
2738 }
2739 
2740 static int qed_configure_filter_mcast(struct qed_dev *cdev,
2741 				      struct qed_filter_mcast_params *params)
2742 {
2743 	struct qed_filter_mcast mcast;
2744 	int i;
2745 
2746 	memset(&mcast, 0, sizeof(mcast));
2747 	switch (params->type) {
2748 	case QED_FILTER_XCAST_TYPE_ADD:
2749 		mcast.opcode = QED_FILTER_ADD;
2750 		break;
2751 	case QED_FILTER_XCAST_TYPE_DEL:
2752 		mcast.opcode = QED_FILTER_REMOVE;
2753 		break;
2754 	default:
2755 		DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
2756 			  params->type);
2757 	}
2758 
2759 	mcast.num_mc_addrs = params->num;
2760 	for (i = 0; i < mcast.num_mc_addrs; i++)
2761 		ether_addr_copy(mcast.mac[i], params->mac[i]);
2762 
2763 	return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
2764 }
2765 
2766 static int qed_configure_arfs_searcher(struct qed_dev *cdev,
2767 				       enum qed_filter_config_mode mode)
2768 {
2769 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2770 	struct qed_arfs_config_params arfs_config_params;
2771 
2772 	memset(&arfs_config_params, 0, sizeof(arfs_config_params));
2773 	arfs_config_params.tcp = true;
2774 	arfs_config_params.udp = true;
2775 	arfs_config_params.ipv4 = true;
2776 	arfs_config_params.ipv6 = true;
2777 	arfs_config_params.mode = mode;
2778 	qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
2779 				&arfs_config_params);
2780 	return 0;
2781 }
2782 
2783 static void
2784 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
2785 			     void *cookie,
2786 			     union event_ring_data *data, u8 fw_return_code)
2787 {
2788 	struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
2789 	void *dev = p_hwfn->cdev->ops_cookie;
2790 
2791 	op->arfs_filter_op(dev, cookie, fw_return_code);
2792 }
2793 
2794 static int
2795 qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
2796 			      void *cookie,
2797 			      struct qed_ntuple_filter_params *params)
2798 {
2799 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2800 	struct qed_spq_comp_cb cb;
2801 	int rc = -EINVAL;
2802 
2803 	cb.function = qed_arfs_sp_response_handler;
2804 	cb.cookie = cookie;
2805 
2806 	if (params->b_is_vf) {
2807 		if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
2808 					   false)) {
2809 			DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
2810 				params->vf_id);
2811 			return rc;
2812 		}
2813 
2814 		params->vport_id = params->vf_id + 1;
2815 		params->qid = QED_RFS_NTUPLE_QID_RSS;
2816 	}
2817 
2818 	rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
2819 	if (rc)
2820 		DP_NOTICE(p_hwfn,
2821 			  "Failed to issue a-RFS filter configuration\n");
2822 	else
2823 		DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
2824 			   "Successfully issued a-RFS filter configuration\n");
2825 
2826 	return rc;
2827 }
2828 
2829 static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
2830 {
2831 	struct qed_queue_cid *p_cid = handle;
2832 	struct qed_hwfn *p_hwfn;
2833 	int rc;
2834 
2835 	p_hwfn = p_cid->p_owner;
2836 	rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
2837 	if (rc)
2838 		DP_VERBOSE(cdev, QED_MSG_DEBUG,
2839 			   "Unable to read queue coalescing\n");
2840 
2841 	return rc;
2842 }
2843 
2844 static int qed_fp_cqe_completion(struct qed_dev *dev,
2845 				 u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
2846 {
2847 	return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2848 				      cqe);
2849 }
2850 
2851 static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac)
2852 {
2853 	int i, ret;
2854 
2855 	if (IS_PF(cdev))
2856 		return 0;
2857 
2858 	for_each_hwfn(cdev, i) {
2859 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2860 
2861 		ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac);
2862 		if (ret)
2863 			return ret;
2864 	}
2865 
2866 	return 0;
2867 }
2868 
2869 static const struct qed_eth_ops qed_eth_ops_pass = {
2870 	.common = &qed_common_ops_pass,
2871 #ifdef CONFIG_QED_SRIOV
2872 	.iov = &qed_iov_ops_pass,
2873 #endif
2874 #ifdef CONFIG_DCB
2875 	.dcb = &qed_dcbnl_ops_pass,
2876 #endif
2877 	.ptp = &qed_ptp_ops_pass,
2878 	.fill_dev_info = &qed_fill_eth_dev_info,
2879 	.register_ops = &qed_register_eth_ops,
2880 	.check_mac = &qed_check_mac,
2881 	.vport_start = &qed_start_vport,
2882 	.vport_stop = &qed_stop_vport,
2883 	.vport_update = &qed_update_vport,
2884 	.q_rx_start = &qed_start_rxq,
2885 	.q_rx_stop = &qed_stop_rxq,
2886 	.q_tx_start = &qed_start_txq,
2887 	.q_tx_stop = &qed_stop_txq,
2888 	.filter_config_rx_mode = &qed_configure_filter_rx_mode,
2889 	.filter_config_ucast = &qed_configure_filter_ucast,
2890 	.filter_config_mcast = &qed_configure_filter_mcast,
2891 	.fastpath_stop = &qed_fastpath_stop,
2892 	.eth_cqe_completion = &qed_fp_cqe_completion,
2893 	.get_vport_stats = &qed_get_vport_stats,
2894 	.tunn_config = &qed_tunn_configure,
2895 	.ntuple_filter_config = &qed_ntuple_arfs_filter_config,
2896 	.configure_arfs_searcher = &qed_configure_arfs_searcher,
2897 	.get_coalesce = &qed_get_coalesce,
2898 	.req_bulletin_update_mac = &qed_req_bulletin_update_mac,
2899 };
2900 
2901 const struct qed_eth_ops *qed_get_eth_ops(void)
2902 {
2903 	return &qed_eth_ops_pass;
2904 }
2905 EXPORT_SYMBOL(qed_get_eth_ops);
2906 
2907 void qed_put_eth_ops(void)
2908 {
2909 	/* TODO - reference count for module? */
2910 }
2911 EXPORT_SYMBOL(qed_put_eth_ops);
2912