1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/io.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/mutex.h>
41 #include <linux/pci.h>
42 #include <linux/slab.h>
43 #include <linux/string.h>
44 #include <linux/vmalloc.h>
45 #include <linux/etherdevice.h>
46 #include <linux/qed/qed_chain.h>
47 #include <linux/qed/qed_if.h>
48 #include "qed.h"
49 #include "qed_cxt.h"
50 #include "qed_dcbx.h"
51 #include "qed_dev_api.h"
52 #include "qed_fcoe.h"
53 #include "qed_hsi.h"
54 #include "qed_hw.h"
55 #include "qed_init_ops.h"
56 #include "qed_int.h"
57 #include "qed_iscsi.h"
58 #include "qed_ll2.h"
59 #include "qed_mcp.h"
60 #include "qed_ooo.h"
61 #include "qed_reg_addr.h"
62 #include "qed_sp.h"
63 #include "qed_sriov.h"
64 #include "qed_vf.h"
65 #include "qed_roce.h"
66 
67 static DEFINE_SPINLOCK(qm_lock);
68 
69 #define QED_MIN_DPIS            (4)
70 #define QED_MIN_PWM_REGION      (QED_WID_SIZE * QED_MIN_DPIS)
71 
72 /* API common to all protocols */
73 enum BAR_ID {
74 	BAR_ID_0,       /* used for GRC */
75 	BAR_ID_1        /* Used for doorbells */
76 };
77 
78 static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
79 {
80 	u32 bar_reg = (bar_id == BAR_ID_0 ?
81 		       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
82 	u32 val;
83 
84 	if (IS_VF(p_hwfn->cdev))
85 		return 1 << 17;
86 
87 	val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
88 	if (val)
89 		return 1 << (val + 15);
90 
91 	/* Old MFW initialized above registered only conditionally */
92 	if (p_hwfn->cdev->num_hwfns > 1) {
93 		DP_INFO(p_hwfn,
94 			"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
95 			return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
96 	} else {
97 		DP_INFO(p_hwfn,
98 			"BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
99 			return 512 * 1024;
100 	}
101 }
102 
103 void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level)
104 {
105 	u32 i;
106 
107 	cdev->dp_level = dp_level;
108 	cdev->dp_module = dp_module;
109 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
110 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
111 
112 		p_hwfn->dp_level = dp_level;
113 		p_hwfn->dp_module = dp_module;
114 	}
115 }
116 
117 void qed_init_struct(struct qed_dev *cdev)
118 {
119 	u8 i;
120 
121 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
122 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
123 
124 		p_hwfn->cdev = cdev;
125 		p_hwfn->my_id = i;
126 		p_hwfn->b_active = false;
127 
128 		mutex_init(&p_hwfn->dmae_info.mutex);
129 	}
130 
131 	/* hwfn 0 is always active */
132 	cdev->hwfns[0].b_active = true;
133 
134 	/* set the default cache alignment to 128 */
135 	cdev->cache_shift = 7;
136 }
137 
138 static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
139 {
140 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
141 
142 	kfree(qm_info->qm_pq_params);
143 	qm_info->qm_pq_params = NULL;
144 	kfree(qm_info->qm_vport_params);
145 	qm_info->qm_vport_params = NULL;
146 	kfree(qm_info->qm_port_params);
147 	qm_info->qm_port_params = NULL;
148 	kfree(qm_info->wfq_data);
149 	qm_info->wfq_data = NULL;
150 }
151 
152 void qed_resc_free(struct qed_dev *cdev)
153 {
154 	int i;
155 
156 	if (IS_VF(cdev))
157 		return;
158 
159 	kfree(cdev->fw_data);
160 	cdev->fw_data = NULL;
161 
162 	kfree(cdev->reset_stats);
163 
164 	for_each_hwfn(cdev, i) {
165 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
166 
167 		qed_cxt_mngr_free(p_hwfn);
168 		qed_qm_info_free(p_hwfn);
169 		qed_spq_free(p_hwfn);
170 		qed_eq_free(p_hwfn, p_hwfn->p_eq);
171 		qed_consq_free(p_hwfn, p_hwfn->p_consq);
172 		qed_int_free(p_hwfn);
173 #ifdef CONFIG_QED_LL2
174 		qed_ll2_free(p_hwfn, p_hwfn->p_ll2_info);
175 #endif
176 		if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
177 			qed_fcoe_free(p_hwfn, p_hwfn->p_fcoe_info);
178 
179 		if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
180 			qed_iscsi_free(p_hwfn, p_hwfn->p_iscsi_info);
181 			qed_ooo_free(p_hwfn, p_hwfn->p_ooo_info);
182 		}
183 		qed_iov_free(p_hwfn);
184 		qed_dmae_info_free(p_hwfn);
185 		qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
186 	}
187 }
188 
189 static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
190 {
191 	u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
192 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
193 	struct init_qm_port_params *p_qm_port;
194 	bool init_rdma_offload_pq = false;
195 	bool init_pure_ack_pq = false;
196 	bool init_ooo_pq = false;
197 	u16 num_pqs, multi_cos_tcs = 1;
198 	u8 pf_wfq = qm_info->pf_wfq;
199 	u32 pf_rl = qm_info->pf_rl;
200 	u16 num_pf_rls = 0;
201 	u16 num_vfs = 0;
202 
203 #ifdef CONFIG_QED_SRIOV
204 	if (p_hwfn->cdev->p_iov_info)
205 		num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
206 #endif
207 	memset(qm_info, 0, sizeof(*qm_info));
208 
209 	num_pqs = multi_cos_tcs + num_vfs + 1;	/* The '1' is for pure-LB */
210 	num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
211 
212 	if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
213 		num_pqs++;	/* for RoCE queue */
214 		init_rdma_offload_pq = true;
215 		/* we subtract num_vfs because each require a rate limiter,
216 		 * and one default rate limiter
217 		 */
218 		if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn)
219 			num_pf_rls = RESC_NUM(p_hwfn, QED_RL) - num_vfs - 1;
220 
221 		num_pqs += num_pf_rls;
222 		qm_info->num_pf_rls = (u8) num_pf_rls;
223 	}
224 
225 	if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
226 		num_pqs += 2;	/* for iSCSI pure-ACK / OOO queue */
227 		init_pure_ack_pq = true;
228 		init_ooo_pq = true;
229 	}
230 
231 	/* Sanity checking that setup requires legal number of resources */
232 	if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
233 		DP_ERR(p_hwfn,
234 		       "Need too many Physical queues - 0x%04x when only %04x are available\n",
235 		       num_pqs, RESC_NUM(p_hwfn, QED_PQ));
236 		return -EINVAL;
237 	}
238 
239 	/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
240 	 */
241 	qm_info->qm_pq_params = kcalloc(num_pqs,
242 					sizeof(struct init_qm_pq_params),
243 					b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
244 	if (!qm_info->qm_pq_params)
245 		goto alloc_err;
246 
247 	qm_info->qm_vport_params = kcalloc(num_vports,
248 					   sizeof(struct init_qm_vport_params),
249 					   b_sleepable ? GFP_KERNEL
250 						       : GFP_ATOMIC);
251 	if (!qm_info->qm_vport_params)
252 		goto alloc_err;
253 
254 	qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
255 					  sizeof(struct init_qm_port_params),
256 					  b_sleepable ? GFP_KERNEL
257 						      : GFP_ATOMIC);
258 	if (!qm_info->qm_port_params)
259 		goto alloc_err;
260 
261 	qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
262 				    b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
263 	if (!qm_info->wfq_data)
264 		goto alloc_err;
265 
266 	vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
267 
268 	/* First init rate limited queues */
269 	for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
270 		qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
271 		qm_info->qm_pq_params[curr_queue].tc_id =
272 		    p_hwfn->hw_info.non_offload_tc;
273 		qm_info->qm_pq_params[curr_queue].wrr_group = 1;
274 		qm_info->qm_pq_params[curr_queue].rl_valid = 1;
275 	}
276 
277 	/* First init per-TC PQs */
278 	for (i = 0; i < multi_cos_tcs; i++) {
279 		struct init_qm_pq_params *params =
280 		    &qm_info->qm_pq_params[curr_queue++];
281 
282 		if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
283 		    p_hwfn->hw_info.personality == QED_PCI_ETH) {
284 			params->vport_id = vport_id;
285 			params->tc_id = p_hwfn->hw_info.non_offload_tc;
286 			params->wrr_group = 1;
287 		} else {
288 			params->vport_id = vport_id;
289 			params->tc_id = p_hwfn->hw_info.offload_tc;
290 			params->wrr_group = 1;
291 		}
292 	}
293 
294 	/* Then init pure-LB PQ */
295 	qm_info->pure_lb_pq = curr_queue;
296 	qm_info->qm_pq_params[curr_queue].vport_id =
297 	    (u8) RESC_START(p_hwfn, QED_VPORT);
298 	qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
299 	qm_info->qm_pq_params[curr_queue].wrr_group = 1;
300 	curr_queue++;
301 
302 	qm_info->offload_pq = 0;
303 	if (init_rdma_offload_pq) {
304 		qm_info->offload_pq = curr_queue;
305 		qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
306 		qm_info->qm_pq_params[curr_queue].tc_id =
307 		    p_hwfn->hw_info.offload_tc;
308 		qm_info->qm_pq_params[curr_queue].wrr_group = 1;
309 		curr_queue++;
310 	}
311 
312 	if (init_pure_ack_pq) {
313 		qm_info->pure_ack_pq = curr_queue;
314 		qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
315 		qm_info->qm_pq_params[curr_queue].tc_id =
316 		    p_hwfn->hw_info.offload_tc;
317 		qm_info->qm_pq_params[curr_queue].wrr_group = 1;
318 		curr_queue++;
319 	}
320 
321 	if (init_ooo_pq) {
322 		qm_info->ooo_pq = curr_queue;
323 		qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
324 		qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
325 		qm_info->qm_pq_params[curr_queue].wrr_group = 1;
326 		curr_queue++;
327 	}
328 
329 	/* Then init per-VF PQs */
330 	vf_offset = curr_queue;
331 	for (i = 0; i < num_vfs; i++) {
332 		/* First vport is used by the PF */
333 		qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
334 		qm_info->qm_pq_params[curr_queue].tc_id =
335 		    p_hwfn->hw_info.non_offload_tc;
336 		qm_info->qm_pq_params[curr_queue].wrr_group = 1;
337 		qm_info->qm_pq_params[curr_queue].rl_valid = 1;
338 		curr_queue++;
339 	}
340 
341 	qm_info->vf_queues_offset = vf_offset;
342 	qm_info->num_pqs = num_pqs;
343 	qm_info->num_vports = num_vports;
344 
345 	/* Initialize qm port parameters */
346 	num_ports = p_hwfn->cdev->num_ports_in_engines;
347 	for (i = 0; i < num_ports; i++) {
348 		p_qm_port = &qm_info->qm_port_params[i];
349 		p_qm_port->active = 1;
350 		if (num_ports == 4)
351 			p_qm_port->active_phys_tcs = 0x7;
352 		else
353 			p_qm_port->active_phys_tcs = 0x9f;
354 		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
355 		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
356 	}
357 
358 	qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
359 
360 	qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
361 
362 	qm_info->num_vf_pqs = num_vfs;
363 	qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
364 
365 	for (i = 0; i < qm_info->num_vports; i++)
366 		qm_info->qm_vport_params[i].vport_wfq = 1;
367 
368 	qm_info->vport_rl_en = 1;
369 	qm_info->vport_wfq_en = 1;
370 	qm_info->pf_rl = pf_rl;
371 	qm_info->pf_wfq = pf_wfq;
372 
373 	return 0;
374 
375 alloc_err:
376 	qed_qm_info_free(p_hwfn);
377 	return -ENOMEM;
378 }
379 
380 /* This function reconfigures the QM pf on the fly.
381  * For this purpose we:
382  * 1. reconfigure the QM database
383  * 2. set new values to runtime arrat
384  * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
385  * 4. activate init tool in QM_PF stage
386  * 5. send an sdm_qm_cmd through rbc interface to release the QM
387  */
388 int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
389 {
390 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
391 	bool b_rc;
392 	int rc;
393 
394 	/* qm_info is allocated in qed_init_qm_info() which is already called
395 	 * from qed_resc_alloc() or previous call of qed_qm_reconf().
396 	 * The allocated size may change each init, so we free it before next
397 	 * allocation.
398 	 */
399 	qed_qm_info_free(p_hwfn);
400 
401 	/* initialize qed's qm data structure */
402 	rc = qed_init_qm_info(p_hwfn, false);
403 	if (rc)
404 		return rc;
405 
406 	/* stop PF's qm queues */
407 	spin_lock_bh(&qm_lock);
408 	b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
409 				    qm_info->start_pq, qm_info->num_pqs);
410 	spin_unlock_bh(&qm_lock);
411 	if (!b_rc)
412 		return -EINVAL;
413 
414 	/* clear the QM_PF runtime phase leftovers from previous init */
415 	qed_init_clear_rt_data(p_hwfn);
416 
417 	/* prepare QM portion of runtime array */
418 	qed_qm_init_pf(p_hwfn);
419 
420 	/* activate init tool on runtime array */
421 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
422 			  p_hwfn->hw_info.hw_mode);
423 	if (rc)
424 		return rc;
425 
426 	/* start PF's qm queues */
427 	spin_lock_bh(&qm_lock);
428 	b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
429 				    qm_info->start_pq, qm_info->num_pqs);
430 	spin_unlock_bh(&qm_lock);
431 	if (!b_rc)
432 		return -EINVAL;
433 
434 	return 0;
435 }
436 
437 int qed_resc_alloc(struct qed_dev *cdev)
438 {
439 	struct qed_iscsi_info *p_iscsi_info;
440 	struct qed_fcoe_info *p_fcoe_info;
441 	struct qed_ooo_info *p_ooo_info;
442 #ifdef CONFIG_QED_LL2
443 	struct qed_ll2_info *p_ll2_info;
444 #endif
445 	struct qed_consq *p_consq;
446 	struct qed_eq *p_eq;
447 	int i, rc = 0;
448 
449 	if (IS_VF(cdev))
450 		return rc;
451 
452 	cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
453 	if (!cdev->fw_data)
454 		return -ENOMEM;
455 
456 	for_each_hwfn(cdev, i) {
457 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
458 		u32 n_eqes, num_cons;
459 
460 		/* First allocate the context manager structure */
461 		rc = qed_cxt_mngr_alloc(p_hwfn);
462 		if (rc)
463 			goto alloc_err;
464 
465 		/* Set the HW cid/tid numbers (in the contest manager)
466 		 * Must be done prior to any further computations.
467 		 */
468 		rc = qed_cxt_set_pf_params(p_hwfn);
469 		if (rc)
470 			goto alloc_err;
471 
472 		/* Prepare and process QM requirements */
473 		rc = qed_init_qm_info(p_hwfn, true);
474 		if (rc)
475 			goto alloc_err;
476 
477 		/* Compute the ILT client partition */
478 		rc = qed_cxt_cfg_ilt_compute(p_hwfn);
479 		if (rc)
480 			goto alloc_err;
481 
482 		/* CID map / ILT shadow table / T2
483 		 * The talbes sizes are determined by the computations above
484 		 */
485 		rc = qed_cxt_tables_alloc(p_hwfn);
486 		if (rc)
487 			goto alloc_err;
488 
489 		/* SPQ, must follow ILT because initializes SPQ context */
490 		rc = qed_spq_alloc(p_hwfn);
491 		if (rc)
492 			goto alloc_err;
493 
494 		/* SP status block allocation */
495 		p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
496 							 RESERVED_PTT_DPC);
497 
498 		rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
499 		if (rc)
500 			goto alloc_err;
501 
502 		rc = qed_iov_alloc(p_hwfn);
503 		if (rc)
504 			goto alloc_err;
505 
506 		/* EQ */
507 		n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
508 		if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
509 			num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
510 							       PROTOCOLID_ROCE,
511 							       NULL) * 2;
512 			n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
513 		} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
514 			num_cons =
515 			    qed_cxt_get_proto_cid_count(p_hwfn,
516 							PROTOCOLID_ISCSI,
517 							NULL);
518 			n_eqes += 2 * num_cons;
519 		}
520 
521 		if (n_eqes > 0xFFFF) {
522 			DP_ERR(p_hwfn,
523 			       "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
524 			       n_eqes, 0xFFFF);
525 			rc = -EINVAL;
526 			goto alloc_err;
527 		}
528 
529 		p_eq = qed_eq_alloc(p_hwfn, (u16) n_eqes);
530 		if (!p_eq)
531 			goto alloc_no_mem;
532 		p_hwfn->p_eq = p_eq;
533 
534 		p_consq = qed_consq_alloc(p_hwfn);
535 		if (!p_consq)
536 			goto alloc_no_mem;
537 		p_hwfn->p_consq = p_consq;
538 
539 #ifdef CONFIG_QED_LL2
540 		if (p_hwfn->using_ll2) {
541 			p_ll2_info = qed_ll2_alloc(p_hwfn);
542 			if (!p_ll2_info)
543 				goto alloc_no_mem;
544 			p_hwfn->p_ll2_info = p_ll2_info;
545 		}
546 #endif
547 
548 		if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
549 			p_fcoe_info = qed_fcoe_alloc(p_hwfn);
550 			if (!p_fcoe_info)
551 				goto alloc_no_mem;
552 			p_hwfn->p_fcoe_info = p_fcoe_info;
553 		}
554 
555 		if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
556 			p_iscsi_info = qed_iscsi_alloc(p_hwfn);
557 			if (!p_iscsi_info)
558 				goto alloc_no_mem;
559 			p_hwfn->p_iscsi_info = p_iscsi_info;
560 			p_ooo_info = qed_ooo_alloc(p_hwfn);
561 			if (!p_ooo_info)
562 				goto alloc_no_mem;
563 			p_hwfn->p_ooo_info = p_ooo_info;
564 		}
565 
566 		/* DMA info initialization */
567 		rc = qed_dmae_info_alloc(p_hwfn);
568 		if (rc)
569 			goto alloc_err;
570 
571 		/* DCBX initialization */
572 		rc = qed_dcbx_info_alloc(p_hwfn);
573 		if (rc)
574 			goto alloc_err;
575 	}
576 
577 	cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
578 	if (!cdev->reset_stats)
579 		goto alloc_no_mem;
580 
581 	return 0;
582 
583 alloc_no_mem:
584 	rc = -ENOMEM;
585 alloc_err:
586 	qed_resc_free(cdev);
587 	return rc;
588 }
589 
590 void qed_resc_setup(struct qed_dev *cdev)
591 {
592 	int i;
593 
594 	if (IS_VF(cdev))
595 		return;
596 
597 	for_each_hwfn(cdev, i) {
598 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
599 
600 		qed_cxt_mngr_setup(p_hwfn);
601 		qed_spq_setup(p_hwfn);
602 		qed_eq_setup(p_hwfn, p_hwfn->p_eq);
603 		qed_consq_setup(p_hwfn, p_hwfn->p_consq);
604 
605 		/* Read shadow of current MFW mailbox */
606 		qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
607 		memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
608 		       p_hwfn->mcp_info->mfw_mb_cur,
609 		       p_hwfn->mcp_info->mfw_mb_length);
610 
611 		qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
612 
613 		qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
614 #ifdef CONFIG_QED_LL2
615 		if (p_hwfn->using_ll2)
616 			qed_ll2_setup(p_hwfn, p_hwfn->p_ll2_info);
617 #endif
618 		if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
619 			qed_fcoe_setup(p_hwfn, p_hwfn->p_fcoe_info);
620 
621 		if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
622 			qed_iscsi_setup(p_hwfn, p_hwfn->p_iscsi_info);
623 			qed_ooo_setup(p_hwfn, p_hwfn->p_ooo_info);
624 		}
625 	}
626 }
627 
628 #define FINAL_CLEANUP_POLL_CNT          (100)
629 #define FINAL_CLEANUP_POLL_TIME         (10)
630 int qed_final_cleanup(struct qed_hwfn *p_hwfn,
631 		      struct qed_ptt *p_ptt, u16 id, bool is_vf)
632 {
633 	u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
634 	int rc = -EBUSY;
635 
636 	addr = GTT_BAR0_MAP_REG_USDM_RAM +
637 		USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
638 
639 	if (is_vf)
640 		id += 0x10;
641 
642 	command |= X_FINAL_CLEANUP_AGG_INT <<
643 		SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
644 	command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
645 	command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
646 	command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
647 
648 	/* Make sure notification is not set before initiating final cleanup */
649 	if (REG_RD(p_hwfn, addr)) {
650 		DP_NOTICE(p_hwfn,
651 			  "Unexpected; Found final cleanup notification before initiating final cleanup\n");
652 		REG_WR(p_hwfn, addr, 0);
653 	}
654 
655 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
656 		   "Sending final cleanup for PFVF[%d] [Command %08x\n]",
657 		   id, command);
658 
659 	qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
660 
661 	/* Poll until completion */
662 	while (!REG_RD(p_hwfn, addr) && count--)
663 		msleep(FINAL_CLEANUP_POLL_TIME);
664 
665 	if (REG_RD(p_hwfn, addr))
666 		rc = 0;
667 	else
668 		DP_NOTICE(p_hwfn,
669 			  "Failed to receive FW final cleanup notification\n");
670 
671 	/* Cleanup afterwards */
672 	REG_WR(p_hwfn, addr, 0);
673 
674 	return rc;
675 }
676 
677 static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
678 {
679 	int hw_mode = 0;
680 
681 	hw_mode = (1 << MODE_BB_B0);
682 
683 	switch (p_hwfn->cdev->num_ports_in_engines) {
684 	case 1:
685 		hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
686 		break;
687 	case 2:
688 		hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
689 		break;
690 	case 4:
691 		hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
692 		break;
693 	default:
694 		DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
695 			  p_hwfn->cdev->num_ports_in_engines);
696 		return;
697 	}
698 
699 	switch (p_hwfn->cdev->mf_mode) {
700 	case QED_MF_DEFAULT:
701 	case QED_MF_NPAR:
702 		hw_mode |= 1 << MODE_MF_SI;
703 		break;
704 	case QED_MF_OVLAN:
705 		hw_mode |= 1 << MODE_MF_SD;
706 		break;
707 	default:
708 		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
709 		hw_mode |= 1 << MODE_MF_SI;
710 	}
711 
712 	hw_mode |= 1 << MODE_ASIC;
713 
714 	if (p_hwfn->cdev->num_hwfns > 1)
715 		hw_mode |= 1 << MODE_100G;
716 
717 	p_hwfn->hw_info.hw_mode = hw_mode;
718 
719 	DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
720 		   "Configuring function for hw_mode: 0x%08x\n",
721 		   p_hwfn->hw_info.hw_mode);
722 }
723 
724 /* Init run time data for all PFs on an engine. */
725 static void qed_init_cau_rt_data(struct qed_dev *cdev)
726 {
727 	u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
728 	int i, sb_id;
729 
730 	for_each_hwfn(cdev, i) {
731 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
732 		struct qed_igu_info *p_igu_info;
733 		struct qed_igu_block *p_block;
734 		struct cau_sb_entry sb_entry;
735 
736 		p_igu_info = p_hwfn->hw_info.p_igu_info;
737 
738 		for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
739 		     sb_id++) {
740 			p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
741 			if (!p_block->is_pf)
742 				continue;
743 
744 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
745 					      p_block->function_id, 0, 0);
746 			STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
747 		}
748 	}
749 }
750 
751 static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
752 			      struct qed_ptt *p_ptt, int hw_mode)
753 {
754 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
755 	struct qed_qm_common_rt_init_params params;
756 	struct qed_dev *cdev = p_hwfn->cdev;
757 	u16 num_pfs, pf_id;
758 	u32 concrete_fid;
759 	int rc = 0;
760 	u8 vf_id;
761 
762 	qed_init_cau_rt_data(cdev);
763 
764 	/* Program GTT windows */
765 	qed_gtt_init(p_hwfn);
766 
767 	if (p_hwfn->mcp_info) {
768 		if (p_hwfn->mcp_info->func_info.bandwidth_max)
769 			qm_info->pf_rl_en = 1;
770 		if (p_hwfn->mcp_info->func_info.bandwidth_min)
771 			qm_info->pf_wfq_en = 1;
772 	}
773 
774 	memset(&params, 0, sizeof(params));
775 	params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
776 	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
777 	params.pf_rl_en = qm_info->pf_rl_en;
778 	params.pf_wfq_en = qm_info->pf_wfq_en;
779 	params.vport_rl_en = qm_info->vport_rl_en;
780 	params.vport_wfq_en = qm_info->vport_wfq_en;
781 	params.port_params = qm_info->qm_port_params;
782 
783 	qed_qm_common_rt_init(p_hwfn, &params);
784 
785 	qed_cxt_hw_init_common(p_hwfn);
786 
787 	/* Close gate from NIG to BRB/Storm; By default they are open, but
788 	 * we close them to prevent NIG from passing data to reset blocks.
789 	 * Should have been done in the ENGINE phase, but init-tool lacks
790 	 * proper port-pretend capabilities.
791 	 */
792 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
793 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
794 	qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
795 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
796 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
797 	qed_port_unpretend(p_hwfn, p_ptt);
798 
799 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
800 	if (rc)
801 		return rc;
802 
803 	qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
804 	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
805 
806 	if (QED_IS_BB(p_hwfn->cdev)) {
807 		num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev);
808 		for (pf_id = 0; pf_id < num_pfs; pf_id++) {
809 			qed_fid_pretend(p_hwfn, p_ptt, pf_id);
810 			qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
811 			qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
812 		}
813 		/* pretend to original PF */
814 		qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
815 	}
816 
817 	for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
818 		concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
819 		qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
820 		qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
821 		qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
822 		qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
823 		qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
824 	}
825 	/* pretend to original PF */
826 	qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
827 
828 	return rc;
829 }
830 
831 static int
832 qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
833 		     struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
834 {
835 	u32 dpi_page_size_1, dpi_page_size_2, dpi_page_size;
836 	u32 dpi_bit_shift, dpi_count;
837 	u32 min_dpis;
838 
839 	/* Calculate DPI size */
840 	dpi_page_size_1 = QED_WID_SIZE * n_cpus;
841 	dpi_page_size_2 = max_t(u32, QED_WID_SIZE, PAGE_SIZE);
842 	dpi_page_size = max_t(u32, dpi_page_size_1, dpi_page_size_2);
843 	dpi_page_size = roundup_pow_of_two(dpi_page_size);
844 	dpi_bit_shift = ilog2(dpi_page_size / 4096);
845 
846 	dpi_count = pwm_region_size / dpi_page_size;
847 
848 	min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
849 	min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis);
850 
851 	p_hwfn->dpi_size = dpi_page_size;
852 	p_hwfn->dpi_count = dpi_count;
853 
854 	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
855 
856 	if (dpi_count < min_dpis)
857 		return -EINVAL;
858 
859 	return 0;
860 }
861 
862 enum QED_ROCE_EDPM_MODE {
863 	QED_ROCE_EDPM_MODE_ENABLE = 0,
864 	QED_ROCE_EDPM_MODE_FORCE_ON = 1,
865 	QED_ROCE_EDPM_MODE_DISABLE = 2,
866 };
867 
868 static int
869 qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
870 {
871 	u32 pwm_regsize, norm_regsize;
872 	u32 non_pwm_conn, min_addr_reg1;
873 	u32 db_bar_size, n_cpus;
874 	u32 roce_edpm_mode;
875 	u32 pf_dems_shift;
876 	int rc = 0;
877 	u8 cond;
878 
879 	db_bar_size = qed_hw_bar_size(p_hwfn, BAR_ID_1);
880 	if (p_hwfn->cdev->num_hwfns > 1)
881 		db_bar_size /= 2;
882 
883 	/* Calculate doorbell regions */
884 	non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
885 		       qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
886 						   NULL) +
887 		       qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
888 						   NULL);
889 	norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, 4096);
890 	min_addr_reg1 = norm_regsize / 4096;
891 	pwm_regsize = db_bar_size - norm_regsize;
892 
893 	/* Check that the normal and PWM sizes are valid */
894 	if (db_bar_size < norm_regsize) {
895 		DP_ERR(p_hwfn->cdev,
896 		       "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
897 		       db_bar_size, norm_regsize);
898 		return -EINVAL;
899 	}
900 
901 	if (pwm_regsize < QED_MIN_PWM_REGION) {
902 		DP_ERR(p_hwfn->cdev,
903 		       "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
904 		       pwm_regsize,
905 		       QED_MIN_PWM_REGION, db_bar_size, norm_regsize);
906 		return -EINVAL;
907 	}
908 
909 	/* Calculate number of DPIs */
910 	roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
911 	if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) ||
912 	    ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) {
913 		/* Either EDPM is mandatory, or we are attempting to allocate a
914 		 * WID per CPU.
915 		 */
916 		n_cpus = num_present_cpus();
917 		rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
918 	}
919 
920 	cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) ||
921 	       (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE);
922 	if (cond || p_hwfn->dcbx_no_edpm) {
923 		/* Either EDPM is disabled from user configuration, or it is
924 		 * disabled via DCBx, or it is not mandatory and we failed to
925 		 * allocated a WID per CPU.
926 		 */
927 		n_cpus = 1;
928 		rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
929 
930 		if (cond)
931 			qed_rdma_dpm_bar(p_hwfn, p_ptt);
932 	}
933 
934 	DP_INFO(p_hwfn,
935 		"doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
936 		norm_regsize,
937 		pwm_regsize,
938 		p_hwfn->dpi_size,
939 		p_hwfn->dpi_count,
940 		((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
941 		"disabled" : "enabled");
942 
943 	if (rc) {
944 		DP_ERR(p_hwfn,
945 		       "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n",
946 		       p_hwfn->dpi_count,
947 		       p_hwfn->pf_params.rdma_pf_params.min_dpis);
948 		return -EINVAL;
949 	}
950 
951 	p_hwfn->dpi_start_offset = norm_regsize;
952 
953 	/* DEMS size is configured log2 of DWORDs, hence the division by 4 */
954 	pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4);
955 	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
956 	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
957 
958 	return 0;
959 }
960 
961 static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
962 			    struct qed_ptt *p_ptt, int hw_mode)
963 {
964 	return qed_init_run(p_hwfn, p_ptt, PHASE_PORT,
965 			    p_hwfn->port_id, hw_mode);
966 }
967 
968 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
969 			  struct qed_ptt *p_ptt,
970 			  struct qed_tunn_start_params *p_tunn,
971 			  int hw_mode,
972 			  bool b_hw_start,
973 			  enum qed_int_mode int_mode,
974 			  bool allow_npar_tx_switch)
975 {
976 	u8 rel_pf_id = p_hwfn->rel_pf_id;
977 	int rc = 0;
978 
979 	if (p_hwfn->mcp_info) {
980 		struct qed_mcp_function_info *p_info;
981 
982 		p_info = &p_hwfn->mcp_info->func_info;
983 		if (p_info->bandwidth_min)
984 			p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
985 
986 		/* Update rate limit once we'll actually have a link */
987 		p_hwfn->qm_info.pf_rl = 100000;
988 	}
989 
990 	qed_cxt_hw_init_pf(p_hwfn);
991 
992 	qed_int_igu_init_rt(p_hwfn);
993 
994 	/* Set VLAN in NIG if needed */
995 	if (hw_mode & BIT(MODE_MF_SD)) {
996 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
997 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
998 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
999 			     p_hwfn->hw_info.ovlan);
1000 	}
1001 
1002 	/* Enable classification by MAC if needed */
1003 	if (hw_mode & BIT(MODE_MF_SI)) {
1004 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
1005 			   "Configuring TAGMAC_CLS_TYPE\n");
1006 		STORE_RT_REG(p_hwfn,
1007 			     NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
1008 	}
1009 
1010 	/* Protocl Configuration  */
1011 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
1012 		     (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
1013 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
1014 		     (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0);
1015 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
1016 
1017 	/* Cleanup chip from previous driver if such remains exist */
1018 	rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
1019 	if (rc)
1020 		return rc;
1021 
1022 	/* PF Init sequence */
1023 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
1024 	if (rc)
1025 		return rc;
1026 
1027 	/* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
1028 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
1029 	if (rc)
1030 		return rc;
1031 
1032 	/* Pure runtime initializations - directly to the HW  */
1033 	qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
1034 
1035 	rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
1036 	if (rc)
1037 		return rc;
1038 
1039 	if (b_hw_start) {
1040 		/* enable interrupts */
1041 		qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
1042 
1043 		/* send function start command */
1044 		rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
1045 				     allow_npar_tx_switch);
1046 		if (rc) {
1047 			DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
1048 			return rc;
1049 		}
1050 		if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
1051 			qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2));
1052 			qed_wr(p_hwfn, p_ptt,
1053 			       PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
1054 			       0x100);
1055 		}
1056 	}
1057 	return rc;
1058 }
1059 
1060 static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
1061 			       struct qed_ptt *p_ptt,
1062 			       u8 enable)
1063 {
1064 	u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
1065 
1066 	/* Change PF in PXP */
1067 	qed_wr(p_hwfn, p_ptt,
1068 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
1069 
1070 	/* wait until value is set - try for 1 second every 50us */
1071 	for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
1072 		val = qed_rd(p_hwfn, p_ptt,
1073 			     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1074 		if (val == set_val)
1075 			break;
1076 
1077 		usleep_range(50, 60);
1078 	}
1079 
1080 	if (val != set_val) {
1081 		DP_NOTICE(p_hwfn,
1082 			  "PFID_ENABLE_MASTER wasn't changed after a second\n");
1083 		return -EAGAIN;
1084 	}
1085 
1086 	return 0;
1087 }
1088 
1089 static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
1090 				struct qed_ptt *p_main_ptt)
1091 {
1092 	/* Read shadow of current MFW mailbox */
1093 	qed_mcp_read_mb(p_hwfn, p_main_ptt);
1094 	memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
1095 	       p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
1096 }
1097 
1098 int qed_hw_init(struct qed_dev *cdev,
1099 		struct qed_tunn_start_params *p_tunn,
1100 		bool b_hw_start,
1101 		enum qed_int_mode int_mode,
1102 		bool allow_npar_tx_switch,
1103 		const u8 *bin_fw_data)
1104 {
1105 	u32 load_code, param, drv_mb_param;
1106 	bool b_default_mtu = true;
1107 	struct qed_hwfn *p_hwfn;
1108 	int rc = 0, mfw_rc, i;
1109 
1110 	if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
1111 		DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
1112 		return -EINVAL;
1113 	}
1114 
1115 	if (IS_PF(cdev)) {
1116 		rc = qed_init_fw_data(cdev, bin_fw_data);
1117 		if (rc)
1118 			return rc;
1119 	}
1120 
1121 	for_each_hwfn(cdev, i) {
1122 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1123 
1124 		/* If management didn't provide a default, set one of our own */
1125 		if (!p_hwfn->hw_info.mtu) {
1126 			p_hwfn->hw_info.mtu = 1500;
1127 			b_default_mtu = false;
1128 		}
1129 
1130 		if (IS_VF(cdev)) {
1131 			p_hwfn->b_int_enabled = 1;
1132 			continue;
1133 		}
1134 
1135 		/* Enable DMAE in PXP */
1136 		rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
1137 
1138 		qed_calc_hw_mode(p_hwfn);
1139 
1140 		rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
1141 		if (rc) {
1142 			DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
1143 			return rc;
1144 		}
1145 
1146 		qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
1147 
1148 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
1149 			   "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
1150 			   rc, load_code);
1151 
1152 		p_hwfn->first_on_engine = (load_code ==
1153 					   FW_MSG_CODE_DRV_LOAD_ENGINE);
1154 
1155 		switch (load_code) {
1156 		case FW_MSG_CODE_DRV_LOAD_ENGINE:
1157 			rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
1158 						p_hwfn->hw_info.hw_mode);
1159 			if (rc)
1160 				break;
1161 		/* Fall into */
1162 		case FW_MSG_CODE_DRV_LOAD_PORT:
1163 			rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
1164 					      p_hwfn->hw_info.hw_mode);
1165 			if (rc)
1166 				break;
1167 
1168 		/* Fall into */
1169 		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1170 			rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
1171 					    p_tunn, p_hwfn->hw_info.hw_mode,
1172 					    b_hw_start, int_mode,
1173 					    allow_npar_tx_switch);
1174 			break;
1175 		default:
1176 			rc = -EINVAL;
1177 			break;
1178 		}
1179 
1180 		if (rc)
1181 			DP_NOTICE(p_hwfn,
1182 				  "init phase failed for loadcode 0x%x (rc %d)\n",
1183 				   load_code, rc);
1184 
1185 		/* ACK mfw regardless of success or failure of initialization */
1186 		mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1187 				     DRV_MSG_CODE_LOAD_DONE,
1188 				     0, &load_code, &param);
1189 		if (rc)
1190 			return rc;
1191 		if (mfw_rc) {
1192 			DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
1193 			return mfw_rc;
1194 		}
1195 
1196 		/* send DCBX attention request command */
1197 		DP_VERBOSE(p_hwfn,
1198 			   QED_MSG_DCB,
1199 			   "sending phony dcbx set command to trigger DCBx attention handling\n");
1200 		mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1201 				     DRV_MSG_CODE_SET_DCBX,
1202 				     1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
1203 				     &load_code, &param);
1204 		if (mfw_rc) {
1205 			DP_NOTICE(p_hwfn,
1206 				  "Failed to send DCBX attention request\n");
1207 			return mfw_rc;
1208 		}
1209 
1210 		p_hwfn->hw_init_done = true;
1211 	}
1212 
1213 	if (IS_PF(cdev)) {
1214 		p_hwfn = QED_LEADING_HWFN(cdev);
1215 		drv_mb_param = (FW_MAJOR_VERSION << 24) |
1216 			       (FW_MINOR_VERSION << 16) |
1217 			       (FW_REVISION_VERSION << 8) |
1218 			       (FW_ENGINEERING_VERSION);
1219 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1220 				 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
1221 				 drv_mb_param, &load_code, &param);
1222 		if (rc)
1223 			DP_INFO(p_hwfn, "Failed to update firmware version\n");
1224 
1225 		if (!b_default_mtu) {
1226 			rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
1227 						   p_hwfn->hw_info.mtu);
1228 			if (rc)
1229 				DP_INFO(p_hwfn,
1230 					"Failed to update default mtu\n");
1231 		}
1232 
1233 		rc = qed_mcp_ov_update_driver_state(p_hwfn,
1234 						    p_hwfn->p_main_ptt,
1235 						  QED_OV_DRIVER_STATE_DISABLED);
1236 		if (rc)
1237 			DP_INFO(p_hwfn, "Failed to update driver state\n");
1238 
1239 		rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
1240 					       QED_OV_ESWITCH_VEB);
1241 		if (rc)
1242 			DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
1243 	}
1244 
1245 	return 0;
1246 }
1247 
1248 #define QED_HW_STOP_RETRY_LIMIT (10)
1249 static void qed_hw_timers_stop(struct qed_dev *cdev,
1250 			       struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1251 {
1252 	int i;
1253 
1254 	/* close timers */
1255 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
1256 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
1257 
1258 	for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
1259 		if ((!qed_rd(p_hwfn, p_ptt,
1260 			     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
1261 		    (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
1262 			break;
1263 
1264 		/* Dependent on number of connection/tasks, possibly
1265 		 * 1ms sleep is required between polls
1266 		 */
1267 		usleep_range(1000, 2000);
1268 	}
1269 
1270 	if (i < QED_HW_STOP_RETRY_LIMIT)
1271 		return;
1272 
1273 	DP_NOTICE(p_hwfn,
1274 		  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
1275 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
1276 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
1277 }
1278 
1279 void qed_hw_timers_stop_all(struct qed_dev *cdev)
1280 {
1281 	int j;
1282 
1283 	for_each_hwfn(cdev, j) {
1284 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
1285 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
1286 
1287 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
1288 	}
1289 }
1290 
1291 int qed_hw_stop(struct qed_dev *cdev)
1292 {
1293 	int rc = 0, t_rc;
1294 	int j;
1295 
1296 	for_each_hwfn(cdev, j) {
1297 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
1298 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
1299 
1300 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
1301 
1302 		if (IS_VF(cdev)) {
1303 			qed_vf_pf_int_cleanup(p_hwfn);
1304 			continue;
1305 		}
1306 
1307 		/* mark the hw as uninitialized... */
1308 		p_hwfn->hw_init_done = false;
1309 
1310 		rc = qed_sp_pf_stop(p_hwfn);
1311 		if (rc)
1312 			DP_NOTICE(p_hwfn,
1313 				  "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
1314 
1315 		qed_wr(p_hwfn, p_ptt,
1316 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
1317 
1318 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
1319 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
1320 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
1321 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
1322 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
1323 
1324 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
1325 
1326 		/* Disable Attention Generation */
1327 		qed_int_igu_disable_int(p_hwfn, p_ptt);
1328 
1329 		qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
1330 		qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
1331 
1332 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
1333 
1334 		/* Need to wait 1ms to guarantee SBs are cleared */
1335 		usleep_range(1000, 2000);
1336 	}
1337 
1338 	if (IS_PF(cdev)) {
1339 		/* Disable DMAE in PXP - in CMT, this should only be done for
1340 		 * first hw-function, and only after all transactions have
1341 		 * stopped for all active hw-functions.
1342 		 */
1343 		t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
1344 					   cdev->hwfns[0].p_main_ptt, false);
1345 		if (t_rc != 0)
1346 			rc = t_rc;
1347 	}
1348 
1349 	return rc;
1350 }
1351 
1352 void qed_hw_stop_fastpath(struct qed_dev *cdev)
1353 {
1354 	int j;
1355 
1356 	for_each_hwfn(cdev, j) {
1357 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
1358 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
1359 
1360 		if (IS_VF(cdev)) {
1361 			qed_vf_pf_int_cleanup(p_hwfn);
1362 			continue;
1363 		}
1364 
1365 		DP_VERBOSE(p_hwfn,
1366 			   NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
1367 
1368 		qed_wr(p_hwfn, p_ptt,
1369 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
1370 
1371 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
1372 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
1373 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
1374 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
1375 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
1376 
1377 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
1378 
1379 		/* Need to wait 1ms to guarantee SBs are cleared */
1380 		usleep_range(1000, 2000);
1381 	}
1382 }
1383 
1384 void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
1385 {
1386 	if (IS_VF(p_hwfn->cdev))
1387 		return;
1388 
1389 	/* Re-open incoming traffic */
1390 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1391 	       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
1392 }
1393 
1394 static int qed_reg_assert(struct qed_hwfn *p_hwfn,
1395 			  struct qed_ptt *p_ptt, u32 reg, bool expected)
1396 {
1397 	u32 assert_val = qed_rd(p_hwfn, p_ptt, reg);
1398 
1399 	if (assert_val != expected) {
1400 		DP_NOTICE(p_hwfn, "Value at address 0x%08x != 0x%08x\n",
1401 			  reg, expected);
1402 		return -EINVAL;
1403 	}
1404 
1405 	return 0;
1406 }
1407 
1408 int qed_hw_reset(struct qed_dev *cdev)
1409 {
1410 	int rc = 0;
1411 	u32 unload_resp, unload_param;
1412 	u32 wol_param;
1413 	int i;
1414 
1415 	switch (cdev->wol_config) {
1416 	case QED_OV_WOL_DISABLED:
1417 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1418 		break;
1419 	case QED_OV_WOL_ENABLED:
1420 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1421 		break;
1422 	default:
1423 		DP_NOTICE(cdev,
1424 			  "Unknown WoL configuration %02x\n", cdev->wol_config);
1425 		/* Fallthrough */
1426 	case QED_OV_WOL_DEFAULT:
1427 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1428 	}
1429 
1430 	for_each_hwfn(cdev, i) {
1431 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1432 
1433 		if (IS_VF(cdev)) {
1434 			rc = qed_vf_pf_reset(p_hwfn);
1435 			if (rc)
1436 				return rc;
1437 			continue;
1438 		}
1439 
1440 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
1441 
1442 		/* Check for incorrect states */
1443 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
1444 			       QM_REG_USG_CNT_PF_TX, 0);
1445 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
1446 			       QM_REG_USG_CNT_PF_OTHER, 0);
1447 
1448 		/* Disable PF in HW blocks */
1449 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
1450 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
1451 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1452 		       TCFC_REG_STRONG_ENABLE_PF, 0);
1453 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1454 		       CCFC_REG_STRONG_ENABLE_PF, 0);
1455 
1456 		/* Send unload command to MCP */
1457 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1458 				 DRV_MSG_CODE_UNLOAD_REQ, wol_param,
1459 				 &unload_resp, &unload_param);
1460 		if (rc) {
1461 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
1462 			unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
1463 		}
1464 
1465 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1466 				 DRV_MSG_CODE_UNLOAD_DONE,
1467 				 0, &unload_resp, &unload_param);
1468 		if (rc) {
1469 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
1470 			return rc;
1471 		}
1472 	}
1473 
1474 	return rc;
1475 }
1476 
1477 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
1478 static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
1479 {
1480 	qed_ptt_pool_free(p_hwfn);
1481 	kfree(p_hwfn->hw_info.p_igu_info);
1482 }
1483 
1484 /* Setup bar access */
1485 static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
1486 {
1487 	/* clear indirect access */
1488 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
1489 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
1490 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
1491 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
1492 
1493 	/* Clean Previous errors if such exist */
1494 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1495 	       PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
1496 
1497 	/* enable internal target-read */
1498 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1499 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1500 }
1501 
1502 static void get_function_id(struct qed_hwfn *p_hwfn)
1503 {
1504 	/* ME Register */
1505 	p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
1506 						  PXP_PF_ME_OPAQUE_ADDR);
1507 
1508 	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
1509 
1510 	p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
1511 	p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1512 				      PXP_CONCRETE_FID_PFID);
1513 	p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1514 				    PXP_CONCRETE_FID_PORT);
1515 
1516 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1517 		   "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
1518 		   p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
1519 }
1520 
1521 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
1522 {
1523 	u32 *feat_num = p_hwfn->hw_info.feat_num;
1524 	struct qed_sb_cnt_info sb_cnt_info;
1525 	int num_features = 1;
1526 
1527 	if (IS_ENABLED(CONFIG_QED_RDMA) &&
1528 	    p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
1529 		/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
1530 		 * the status blocks equally between L2 / RoCE but with
1531 		 * consideration as to how many l2 queues / cnqs we have.
1532 		 */
1533 		num_features++;
1534 
1535 		feat_num[QED_RDMA_CNQ] =
1536 			min_t(u32, RESC_NUM(p_hwfn, QED_SB) / num_features,
1537 			      RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
1538 	}
1539 
1540 	feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
1541 						num_features,
1542 					RESC_NUM(p_hwfn, QED_L2_QUEUE));
1543 
1544 	memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
1545 	qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
1546 	feat_num[QED_VF_L2_QUE] =
1547 	    min_t(u32,
1548 		  RESC_NUM(p_hwfn, QED_L2_QUEUE) -
1549 		  FEAT_NUM(p_hwfn, QED_PF_L2_QUE), sb_cnt_info.sb_iov_cnt);
1550 
1551 	DP_VERBOSE(p_hwfn,
1552 		   NETIF_MSG_PROBE,
1553 		   "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d #SBS=%d num_features=%d\n",
1554 		   (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
1555 		   (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
1556 		   (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
1557 		   RESC_NUM(p_hwfn, QED_SB), num_features);
1558 }
1559 
1560 static enum resource_id_enum qed_hw_get_mfw_res_id(enum qed_resources res_id)
1561 {
1562 	enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
1563 
1564 	switch (res_id) {
1565 	case QED_SB:
1566 		mfw_res_id = RESOURCE_NUM_SB_E;
1567 		break;
1568 	case QED_L2_QUEUE:
1569 		mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
1570 		break;
1571 	case QED_VPORT:
1572 		mfw_res_id = RESOURCE_NUM_VPORT_E;
1573 		break;
1574 	case QED_RSS_ENG:
1575 		mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
1576 		break;
1577 	case QED_PQ:
1578 		mfw_res_id = RESOURCE_NUM_PQ_E;
1579 		break;
1580 	case QED_RL:
1581 		mfw_res_id = RESOURCE_NUM_RL_E;
1582 		break;
1583 	case QED_MAC:
1584 	case QED_VLAN:
1585 		/* Each VFC resource can accommodate both a MAC and a VLAN */
1586 		mfw_res_id = RESOURCE_VFC_FILTER_E;
1587 		break;
1588 	case QED_ILT:
1589 		mfw_res_id = RESOURCE_ILT_E;
1590 		break;
1591 	case QED_LL2_QUEUE:
1592 		mfw_res_id = RESOURCE_LL2_QUEUE_E;
1593 		break;
1594 	case QED_RDMA_CNQ_RAM:
1595 	case QED_CMDQS_CQS:
1596 		/* CNQ/CMDQS are the same resource */
1597 		mfw_res_id = RESOURCE_CQS_E;
1598 		break;
1599 	case QED_RDMA_STATS_QUEUE:
1600 		mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
1601 		break;
1602 	default:
1603 		break;
1604 	}
1605 
1606 	return mfw_res_id;
1607 }
1608 
1609 static u32 qed_hw_get_dflt_resc_num(struct qed_hwfn *p_hwfn,
1610 				    enum qed_resources res_id)
1611 {
1612 	u8 num_funcs = p_hwfn->num_funcs_on_engine;
1613 	struct qed_sb_cnt_info sb_cnt_info;
1614 	u32 dflt_resc_num = 0;
1615 
1616 	switch (res_id) {
1617 	case QED_SB:
1618 		memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
1619 		qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
1620 		dflt_resc_num = sb_cnt_info.sb_cnt;
1621 		break;
1622 	case QED_L2_QUEUE:
1623 		dflt_resc_num = MAX_NUM_L2_QUEUES_BB / num_funcs;
1624 		break;
1625 	case QED_VPORT:
1626 		dflt_resc_num = MAX_NUM_VPORTS_BB / num_funcs;
1627 		break;
1628 	case QED_RSS_ENG:
1629 		dflt_resc_num = ETH_RSS_ENGINE_NUM_BB / num_funcs;
1630 		break;
1631 	case QED_PQ:
1632 		/* The granularity of the PQs is 8 */
1633 		dflt_resc_num = MAX_QM_TX_QUEUES_BB / num_funcs;
1634 		dflt_resc_num &= ~0x7;
1635 		break;
1636 	case QED_RL:
1637 		dflt_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
1638 		break;
1639 	case QED_MAC:
1640 	case QED_VLAN:
1641 		/* Each VFC resource can accommodate both a MAC and a VLAN */
1642 		dflt_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
1643 		break;
1644 	case QED_ILT:
1645 		dflt_resc_num = PXP_NUM_ILT_RECORDS_BB / num_funcs;
1646 		break;
1647 	case QED_LL2_QUEUE:
1648 		dflt_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
1649 		break;
1650 	case QED_RDMA_CNQ_RAM:
1651 	case QED_CMDQS_CQS:
1652 		/* CNQ/CMDQS are the same resource */
1653 		dflt_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
1654 		break;
1655 	case QED_RDMA_STATS_QUEUE:
1656 		dflt_resc_num = RDMA_NUM_STATISTIC_COUNTERS_BB / num_funcs;
1657 		break;
1658 	default:
1659 		break;
1660 	}
1661 
1662 	return dflt_resc_num;
1663 }
1664 
1665 static const char *qed_hw_get_resc_name(enum qed_resources res_id)
1666 {
1667 	switch (res_id) {
1668 	case QED_SB:
1669 		return "SB";
1670 	case QED_L2_QUEUE:
1671 		return "L2_QUEUE";
1672 	case QED_VPORT:
1673 		return "VPORT";
1674 	case QED_RSS_ENG:
1675 		return "RSS_ENG";
1676 	case QED_PQ:
1677 		return "PQ";
1678 	case QED_RL:
1679 		return "RL";
1680 	case QED_MAC:
1681 		return "MAC";
1682 	case QED_VLAN:
1683 		return "VLAN";
1684 	case QED_RDMA_CNQ_RAM:
1685 		return "RDMA_CNQ_RAM";
1686 	case QED_ILT:
1687 		return "ILT";
1688 	case QED_LL2_QUEUE:
1689 		return "LL2_QUEUE";
1690 	case QED_CMDQS_CQS:
1691 		return "CMDQS_CQS";
1692 	case QED_RDMA_STATS_QUEUE:
1693 		return "RDMA_STATS_QUEUE";
1694 	default:
1695 		return "UNKNOWN_RESOURCE";
1696 	}
1697 }
1698 
1699 static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
1700 				enum qed_resources res_id)
1701 {
1702 	u32 dflt_resc_num = 0, dflt_resc_start = 0, mcp_resp, mcp_param;
1703 	u32 *p_resc_num, *p_resc_start;
1704 	struct resource_info resc_info;
1705 	int rc;
1706 
1707 	p_resc_num = &RESC_NUM(p_hwfn, res_id);
1708 	p_resc_start = &RESC_START(p_hwfn, res_id);
1709 
1710 	/* Default values assumes that each function received equal share */
1711 	dflt_resc_num = qed_hw_get_dflt_resc_num(p_hwfn, res_id);
1712 	if (!dflt_resc_num) {
1713 		DP_ERR(p_hwfn,
1714 		       "Failed to get default amount for resource %d [%s]\n",
1715 		       res_id, qed_hw_get_resc_name(res_id));
1716 		return -EINVAL;
1717 	}
1718 	dflt_resc_start = dflt_resc_num * p_hwfn->enabled_func_idx;
1719 
1720 	memset(&resc_info, 0, sizeof(resc_info));
1721 	resc_info.res_id = qed_hw_get_mfw_res_id(res_id);
1722 	if (resc_info.res_id == RESOURCE_NUM_INVALID) {
1723 		DP_ERR(p_hwfn,
1724 		       "Failed to match resource %d [%s] with the MFW resources\n",
1725 		       res_id, qed_hw_get_resc_name(res_id));
1726 		return -EINVAL;
1727 	}
1728 
1729 	rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, &resc_info,
1730 				   &mcp_resp, &mcp_param);
1731 	if (rc) {
1732 		DP_NOTICE(p_hwfn,
1733 			  "MFW response failure for an allocation request for resource %d [%s]\n",
1734 			  res_id, qed_hw_get_resc_name(res_id));
1735 		return rc;
1736 	}
1737 
1738 	/* Default driver values are applied in the following cases:
1739 	 * - The resource allocation MB command is not supported by the MFW
1740 	 * - There is an internal error in the MFW while processing the request
1741 	 * - The resource ID is unknown to the MFW
1742 	 */
1743 	if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK &&
1744 	    mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED) {
1745 		DP_NOTICE(p_hwfn,
1746 			  "Resource %d [%s]: No allocation info was received [mcp_resp 0x%x]. Applying default values [num %d, start %d].\n",
1747 			  res_id,
1748 			  qed_hw_get_resc_name(res_id),
1749 			  mcp_resp, dflt_resc_num, dflt_resc_start);
1750 		*p_resc_num = dflt_resc_num;
1751 		*p_resc_start = dflt_resc_start;
1752 		goto out;
1753 	}
1754 
1755 	/* Special handling for status blocks; Would be revised in future */
1756 	if (res_id == QED_SB) {
1757 		resc_info.size -= 1;
1758 		resc_info.offset -= p_hwfn->enabled_func_idx;
1759 	}
1760 
1761 	*p_resc_num = resc_info.size;
1762 	*p_resc_start = resc_info.offset;
1763 
1764 out:
1765 	/* PQs have to divide by 8 [that's the HW granularity].
1766 	 * Reduce number so it would fit.
1767 	 */
1768 	if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) {
1769 		DP_INFO(p_hwfn,
1770 			"PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
1771 			*p_resc_num,
1772 			(*p_resc_num) & ~0x7,
1773 			*p_resc_start, (*p_resc_start) & ~0x7);
1774 		*p_resc_num &= ~0x7;
1775 		*p_resc_start &= ~0x7;
1776 	}
1777 
1778 	return 0;
1779 }
1780 
1781 static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1782 {
1783 	u8 res_id;
1784 	int rc;
1785 
1786 	for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
1787 		rc = qed_hw_set_resc_info(p_hwfn, res_id);
1788 		if (rc)
1789 			return rc;
1790 	}
1791 
1792 	/* Sanity for ILT */
1793 	if ((RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB)) {
1794 		DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
1795 			  RESC_START(p_hwfn, QED_ILT),
1796 			  RESC_END(p_hwfn, QED_ILT) - 1);
1797 		return -EINVAL;
1798 	}
1799 
1800 	qed_hw_set_feat(p_hwfn);
1801 
1802 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1803 		   "The numbers for each resource are:\n");
1804 	for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
1805 		DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n",
1806 			   qed_hw_get_resc_name(res_id),
1807 			   RESC_NUM(p_hwfn, res_id),
1808 			   RESC_START(p_hwfn, res_id));
1809 
1810 	return 0;
1811 }
1812 
1813 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1814 {
1815 	u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
1816 	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1817 	struct qed_mcp_link_params *link;
1818 
1819 	/* Read global nvm_cfg address */
1820 	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1821 
1822 	/* Verify MCP has initialized it */
1823 	if (!nvm_cfg_addr) {
1824 		DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1825 		return -EINVAL;
1826 	}
1827 
1828 	/* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
1829 	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1830 
1831 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1832 	       offsetof(struct nvm_cfg1, glob) +
1833 	       offsetof(struct nvm_cfg1_glob, core_cfg);
1834 
1835 	core_cfg = qed_rd(p_hwfn, p_ptt, addr);
1836 
1837 	switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1838 		NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1839 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
1840 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1841 		break;
1842 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
1843 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1844 		break;
1845 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
1846 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1847 		break;
1848 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
1849 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1850 		break;
1851 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
1852 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1853 		break;
1854 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
1855 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1856 		break;
1857 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
1858 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1859 		break;
1860 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
1861 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1862 		break;
1863 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
1864 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1865 		break;
1866 	default:
1867 		DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
1868 		break;
1869 	}
1870 
1871 	/* Read default link configuration */
1872 	link = &p_hwfn->mcp_info->link_input;
1873 	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1874 			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1875 	link_temp = qed_rd(p_hwfn, p_ptt,
1876 			   port_cfg_addr +
1877 			   offsetof(struct nvm_cfg1_port, speed_cap_mask));
1878 	link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1879 	link->speed.advertised_speeds = link_temp;
1880 
1881 	link_temp = link->speed.advertised_speeds;
1882 	p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
1883 
1884 	link_temp = qed_rd(p_hwfn, p_ptt,
1885 			   port_cfg_addr +
1886 			   offsetof(struct nvm_cfg1_port, link_settings));
1887 	switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1888 		NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1889 	case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1890 		link->speed.autoneg = true;
1891 		break;
1892 	case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1893 		link->speed.forced_speed = 1000;
1894 		break;
1895 	case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1896 		link->speed.forced_speed = 10000;
1897 		break;
1898 	case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1899 		link->speed.forced_speed = 25000;
1900 		break;
1901 	case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1902 		link->speed.forced_speed = 40000;
1903 		break;
1904 	case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1905 		link->speed.forced_speed = 50000;
1906 		break;
1907 	case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
1908 		link->speed.forced_speed = 100000;
1909 		break;
1910 	default:
1911 		DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
1912 	}
1913 
1914 	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1915 	link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1916 	link->pause.autoneg = !!(link_temp &
1917 				 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1918 	link->pause.forced_rx = !!(link_temp &
1919 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1920 	link->pause.forced_tx = !!(link_temp &
1921 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1922 	link->loopback_mode = 0;
1923 
1924 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1925 		   "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1926 		   link->speed.forced_speed, link->speed.advertised_speeds,
1927 		   link->speed.autoneg, link->pause.autoneg);
1928 
1929 	/* Read Multi-function information from shmem */
1930 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1931 	       offsetof(struct nvm_cfg1, glob) +
1932 	       offsetof(struct nvm_cfg1_glob, generic_cont0);
1933 
1934 	generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1935 
1936 	mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1937 		  NVM_CFG1_GLOB_MF_MODE_OFFSET;
1938 
1939 	switch (mf_mode) {
1940 	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1941 		p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
1942 		break;
1943 	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1944 		p_hwfn->cdev->mf_mode = QED_MF_NPAR;
1945 		break;
1946 	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
1947 		p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
1948 		break;
1949 	}
1950 	DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1951 		p_hwfn->cdev->mf_mode);
1952 
1953 	/* Read Multi-function information from shmem */
1954 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1955 		offsetof(struct nvm_cfg1, glob) +
1956 		offsetof(struct nvm_cfg1_glob, device_capabilities);
1957 
1958 	device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
1959 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
1960 		__set_bit(QED_DEV_CAP_ETH,
1961 			  &p_hwfn->hw_info.device_capabilities);
1962 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
1963 		__set_bit(QED_DEV_CAP_FCOE,
1964 			  &p_hwfn->hw_info.device_capabilities);
1965 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
1966 		__set_bit(QED_DEV_CAP_ISCSI,
1967 			  &p_hwfn->hw_info.device_capabilities);
1968 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
1969 		__set_bit(QED_DEV_CAP_ROCE,
1970 			  &p_hwfn->hw_info.device_capabilities);
1971 
1972 	return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1973 }
1974 
1975 static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1976 {
1977 	u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
1978 	u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
1979 
1980 	num_funcs = MAX_NUM_PFS_BB;
1981 
1982 	/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
1983 	 * in the other bits are selected.
1984 	 * Bits 1-15 are for functions 1-15, respectively, and their value is
1985 	 * '0' only for enabled functions (function 0 always exists and
1986 	 * enabled).
1987 	 * In case of CMT, only the "even" functions are enabled, and thus the
1988 	 * number of functions for both hwfns is learnt from the same bits.
1989 	 */
1990 	reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
1991 
1992 	if (reg_function_hide & 0x1) {
1993 		if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
1994 			num_funcs = 0;
1995 			eng_mask = 0xaaaa;
1996 		} else {
1997 			num_funcs = 1;
1998 			eng_mask = 0x5554;
1999 		}
2000 
2001 		/* Get the number of the enabled functions on the engine */
2002 		tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
2003 		while (tmp) {
2004 			if (tmp & 0x1)
2005 				num_funcs++;
2006 			tmp >>= 0x1;
2007 		}
2008 
2009 		/* Get the PF index within the enabled functions */
2010 		low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
2011 		tmp = reg_function_hide & eng_mask & low_pfs_mask;
2012 		while (tmp) {
2013 			if (tmp & 0x1)
2014 				enabled_func_idx--;
2015 			tmp >>= 0x1;
2016 		}
2017 	}
2018 
2019 	p_hwfn->num_funcs_on_engine = num_funcs;
2020 	p_hwfn->enabled_func_idx = enabled_func_idx;
2021 
2022 	DP_VERBOSE(p_hwfn,
2023 		   NETIF_MSG_PROBE,
2024 		   "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
2025 		   p_hwfn->rel_pf_id,
2026 		   p_hwfn->abs_pf_id,
2027 		   p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
2028 }
2029 
2030 static int
2031 qed_get_hw_info(struct qed_hwfn *p_hwfn,
2032 		struct qed_ptt *p_ptt,
2033 		enum qed_pci_personality personality)
2034 {
2035 	u32 port_mode;
2036 	int rc;
2037 
2038 	/* Since all information is common, only first hwfns should do this */
2039 	if (IS_LEAD_HWFN(p_hwfn)) {
2040 		rc = qed_iov_hw_info(p_hwfn);
2041 		if (rc)
2042 			return rc;
2043 	}
2044 
2045 	/* Read the port mode */
2046 	port_mode = qed_rd(p_hwfn, p_ptt,
2047 			   CNIG_REG_NW_PORT_MODE_BB_B0);
2048 
2049 	if (port_mode < 3) {
2050 		p_hwfn->cdev->num_ports_in_engines = 1;
2051 	} else if (port_mode <= 5) {
2052 		p_hwfn->cdev->num_ports_in_engines = 2;
2053 	} else {
2054 		DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
2055 			  p_hwfn->cdev->num_ports_in_engines);
2056 
2057 		/* Default num_ports_in_engines to something */
2058 		p_hwfn->cdev->num_ports_in_engines = 1;
2059 	}
2060 
2061 	qed_hw_get_nvm_info(p_hwfn, p_ptt);
2062 
2063 	rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
2064 	if (rc)
2065 		return rc;
2066 
2067 	if (qed_mcp_is_init(p_hwfn))
2068 		ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
2069 				p_hwfn->mcp_info->func_info.mac);
2070 	else
2071 		eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
2072 
2073 	if (qed_mcp_is_init(p_hwfn)) {
2074 		if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
2075 			p_hwfn->hw_info.ovlan =
2076 				p_hwfn->mcp_info->func_info.ovlan;
2077 
2078 		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
2079 	}
2080 
2081 	if (qed_mcp_is_init(p_hwfn)) {
2082 		enum qed_pci_personality protocol;
2083 
2084 		protocol = p_hwfn->mcp_info->func_info.protocol;
2085 		p_hwfn->hw_info.personality = protocol;
2086 	}
2087 
2088 	qed_get_num_funcs(p_hwfn, p_ptt);
2089 
2090 	if (qed_mcp_is_init(p_hwfn))
2091 		p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
2092 
2093 	return qed_hw_get_resc(p_hwfn);
2094 }
2095 
2096 static int qed_get_dev_info(struct qed_dev *cdev)
2097 {
2098 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2099 	u32 tmp;
2100 
2101 	/* Read Vendor Id / Device Id */
2102 	pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
2103 	pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
2104 
2105 	cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
2106 				     MISCS_REG_CHIP_NUM);
2107 	cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
2108 				     MISCS_REG_CHIP_REV);
2109 	MASK_FIELD(CHIP_REV, cdev->chip_rev);
2110 
2111 	cdev->type = QED_DEV_TYPE_BB;
2112 	/* Learn number of HW-functions */
2113 	tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
2114 		     MISCS_REG_CMT_ENABLED_FOR_PAIR);
2115 
2116 	if (tmp & (1 << p_hwfn->rel_pf_id)) {
2117 		DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
2118 		cdev->num_hwfns = 2;
2119 	} else {
2120 		cdev->num_hwfns = 1;
2121 	}
2122 
2123 	cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
2124 				    MISCS_REG_CHIP_TEST_REG) >> 4;
2125 	MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
2126 	cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
2127 				       MISCS_REG_CHIP_METAL);
2128 	MASK_FIELD(CHIP_METAL, cdev->chip_metal);
2129 
2130 	DP_INFO(cdev->hwfns,
2131 		"Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
2132 		cdev->chip_num, cdev->chip_rev,
2133 		cdev->chip_bond_id, cdev->chip_metal);
2134 
2135 	if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
2136 		DP_NOTICE(cdev->hwfns,
2137 			  "The chip type/rev (BB A0) is not supported!\n");
2138 		return -EINVAL;
2139 	}
2140 
2141 	return 0;
2142 }
2143 
2144 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
2145 				 void __iomem *p_regview,
2146 				 void __iomem *p_doorbells,
2147 				 enum qed_pci_personality personality)
2148 {
2149 	int rc = 0;
2150 
2151 	/* Split PCI bars evenly between hwfns */
2152 	p_hwfn->regview = p_regview;
2153 	p_hwfn->doorbells = p_doorbells;
2154 
2155 	if (IS_VF(p_hwfn->cdev))
2156 		return qed_vf_hw_prepare(p_hwfn);
2157 
2158 	/* Validate that chip access is feasible */
2159 	if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
2160 		DP_ERR(p_hwfn,
2161 		       "Reading the ME register returns all Fs; Preventing further chip access\n");
2162 		return -EINVAL;
2163 	}
2164 
2165 	get_function_id(p_hwfn);
2166 
2167 	/* Allocate PTT pool */
2168 	rc = qed_ptt_pool_alloc(p_hwfn);
2169 	if (rc)
2170 		goto err0;
2171 
2172 	/* Allocate the main PTT */
2173 	p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
2174 
2175 	/* First hwfn learns basic information, e.g., number of hwfns */
2176 	if (!p_hwfn->my_id) {
2177 		rc = qed_get_dev_info(p_hwfn->cdev);
2178 		if (rc)
2179 			goto err1;
2180 	}
2181 
2182 	qed_hw_hwfn_prepare(p_hwfn);
2183 
2184 	/* Initialize MCP structure */
2185 	rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
2186 	if (rc) {
2187 		DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
2188 		goto err1;
2189 	}
2190 
2191 	/* Read the device configuration information from the HW and SHMEM */
2192 	rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
2193 	if (rc) {
2194 		DP_NOTICE(p_hwfn, "Failed to get HW information\n");
2195 		goto err2;
2196 	}
2197 
2198 	/* Allocate the init RT array and initialize the init-ops engine */
2199 	rc = qed_init_alloc(p_hwfn);
2200 	if (rc)
2201 		goto err2;
2202 
2203 	return rc;
2204 err2:
2205 	if (IS_LEAD_HWFN(p_hwfn))
2206 		qed_iov_free_hw_info(p_hwfn->cdev);
2207 	qed_mcp_free(p_hwfn);
2208 err1:
2209 	qed_hw_hwfn_free(p_hwfn);
2210 err0:
2211 	return rc;
2212 }
2213 
2214 int qed_hw_prepare(struct qed_dev *cdev,
2215 		   int personality)
2216 {
2217 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2218 	int rc;
2219 
2220 	/* Store the precompiled init data ptrs */
2221 	if (IS_PF(cdev))
2222 		qed_init_iro_array(cdev);
2223 
2224 	/* Initialize the first hwfn - will learn number of hwfns */
2225 	rc = qed_hw_prepare_single(p_hwfn,
2226 				   cdev->regview,
2227 				   cdev->doorbells, personality);
2228 	if (rc)
2229 		return rc;
2230 
2231 	personality = p_hwfn->hw_info.personality;
2232 
2233 	/* Initialize the rest of the hwfns */
2234 	if (cdev->num_hwfns > 1) {
2235 		void __iomem *p_regview, *p_doorbell;
2236 		u8 __iomem *addr;
2237 
2238 		/* adjust bar offset for second engine */
2239 		addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
2240 		p_regview = addr;
2241 
2242 		/* adjust doorbell bar offset for second engine */
2243 		addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
2244 		p_doorbell = addr;
2245 
2246 		/* prepare second hw function */
2247 		rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
2248 					   p_doorbell, personality);
2249 
2250 		/* in case of error, need to free the previously
2251 		 * initiliazed hwfn 0.
2252 		 */
2253 		if (rc) {
2254 			if (IS_PF(cdev)) {
2255 				qed_init_free(p_hwfn);
2256 				qed_mcp_free(p_hwfn);
2257 				qed_hw_hwfn_free(p_hwfn);
2258 			}
2259 		}
2260 	}
2261 
2262 	return rc;
2263 }
2264 
2265 void qed_hw_remove(struct qed_dev *cdev)
2266 {
2267 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2268 	int i;
2269 
2270 	if (IS_PF(cdev))
2271 		qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
2272 					       QED_OV_DRIVER_STATE_NOT_LOADED);
2273 
2274 	for_each_hwfn(cdev, i) {
2275 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2276 
2277 		if (IS_VF(cdev)) {
2278 			qed_vf_pf_release(p_hwfn);
2279 			continue;
2280 		}
2281 
2282 		qed_init_free(p_hwfn);
2283 		qed_hw_hwfn_free(p_hwfn);
2284 		qed_mcp_free(p_hwfn);
2285 	}
2286 
2287 	qed_iov_free_hw_info(cdev);
2288 }
2289 
2290 static void qed_chain_free_next_ptr(struct qed_dev *cdev,
2291 				    struct qed_chain *p_chain)
2292 {
2293 	void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL;
2294 	dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
2295 	struct qed_chain_next *p_next;
2296 	u32 size, i;
2297 
2298 	if (!p_virt)
2299 		return;
2300 
2301 	size = p_chain->elem_size * p_chain->usable_per_page;
2302 
2303 	for (i = 0; i < p_chain->page_cnt; i++) {
2304 		if (!p_virt)
2305 			break;
2306 
2307 		p_next = (struct qed_chain_next *)((u8 *)p_virt + size);
2308 		p_virt_next = p_next->next_virt;
2309 		p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
2310 
2311 		dma_free_coherent(&cdev->pdev->dev,
2312 				  QED_CHAIN_PAGE_SIZE, p_virt, p_phys);
2313 
2314 		p_virt = p_virt_next;
2315 		p_phys = p_phys_next;
2316 	}
2317 }
2318 
2319 static void qed_chain_free_single(struct qed_dev *cdev,
2320 				  struct qed_chain *p_chain)
2321 {
2322 	if (!p_chain->p_virt_addr)
2323 		return;
2324 
2325 	dma_free_coherent(&cdev->pdev->dev,
2326 			  QED_CHAIN_PAGE_SIZE,
2327 			  p_chain->p_virt_addr, p_chain->p_phys_addr);
2328 }
2329 
2330 static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
2331 {
2332 	void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
2333 	u32 page_cnt = p_chain->page_cnt, i, pbl_size;
2334 	u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;
2335 
2336 	if (!pp_virt_addr_tbl)
2337 		return;
2338 
2339 	if (!p_pbl_virt)
2340 		goto out;
2341 
2342 	for (i = 0; i < page_cnt; i++) {
2343 		if (!pp_virt_addr_tbl[i])
2344 			break;
2345 
2346 		dma_free_coherent(&cdev->pdev->dev,
2347 				  QED_CHAIN_PAGE_SIZE,
2348 				  pp_virt_addr_tbl[i],
2349 				  *(dma_addr_t *)p_pbl_virt);
2350 
2351 		p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
2352 	}
2353 
2354 	pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
2355 	dma_free_coherent(&cdev->pdev->dev,
2356 			  pbl_size,
2357 			  p_chain->pbl_sp.p_virt_table,
2358 			  p_chain->pbl_sp.p_phys_table);
2359 out:
2360 	vfree(p_chain->pbl.pp_virt_addr_tbl);
2361 }
2362 
2363 void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
2364 {
2365 	switch (p_chain->mode) {
2366 	case QED_CHAIN_MODE_NEXT_PTR:
2367 		qed_chain_free_next_ptr(cdev, p_chain);
2368 		break;
2369 	case QED_CHAIN_MODE_SINGLE:
2370 		qed_chain_free_single(cdev, p_chain);
2371 		break;
2372 	case QED_CHAIN_MODE_PBL:
2373 		qed_chain_free_pbl(cdev, p_chain);
2374 		break;
2375 	}
2376 }
2377 
2378 static int
2379 qed_chain_alloc_sanity_check(struct qed_dev *cdev,
2380 			     enum qed_chain_cnt_type cnt_type,
2381 			     size_t elem_size, u32 page_cnt)
2382 {
2383 	u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
2384 
2385 	/* The actual chain size can be larger than the maximal possible value
2386 	 * after rounding up the requested elements number to pages, and after
2387 	 * taking into acount the unusuable elements (next-ptr elements).
2388 	 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
2389 	 * size/capacity fields are of a u32 type.
2390 	 */
2391 	if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
2392 	     chain_size > 0x10000) ||
2393 	    (cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
2394 	     chain_size > 0x100000000ULL)) {
2395 		DP_NOTICE(cdev,
2396 			  "The actual chain size (0x%llx) is larger than the maximal possible value\n",
2397 			  chain_size);
2398 		return -EINVAL;
2399 	}
2400 
2401 	return 0;
2402 }
2403 
2404 static int
2405 qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
2406 {
2407 	void *p_virt = NULL, *p_virt_prev = NULL;
2408 	dma_addr_t p_phys = 0;
2409 	u32 i;
2410 
2411 	for (i = 0; i < p_chain->page_cnt; i++) {
2412 		p_virt = dma_alloc_coherent(&cdev->pdev->dev,
2413 					    QED_CHAIN_PAGE_SIZE,
2414 					    &p_phys, GFP_KERNEL);
2415 		if (!p_virt)
2416 			return -ENOMEM;
2417 
2418 		if (i == 0) {
2419 			qed_chain_init_mem(p_chain, p_virt, p_phys);
2420 			qed_chain_reset(p_chain);
2421 		} else {
2422 			qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
2423 						     p_virt, p_phys);
2424 		}
2425 
2426 		p_virt_prev = p_virt;
2427 	}
2428 	/* Last page's next element should point to the beginning of the
2429 	 * chain.
2430 	 */
2431 	qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
2432 				     p_chain->p_virt_addr,
2433 				     p_chain->p_phys_addr);
2434 
2435 	return 0;
2436 }
2437 
2438 static int
2439 qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
2440 {
2441 	dma_addr_t p_phys = 0;
2442 	void *p_virt = NULL;
2443 
2444 	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
2445 				    QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
2446 	if (!p_virt)
2447 		return -ENOMEM;
2448 
2449 	qed_chain_init_mem(p_chain, p_virt, p_phys);
2450 	qed_chain_reset(p_chain);
2451 
2452 	return 0;
2453 }
2454 
2455 static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
2456 {
2457 	u32 page_cnt = p_chain->page_cnt, size, i;
2458 	dma_addr_t p_phys = 0, p_pbl_phys = 0;
2459 	void **pp_virt_addr_tbl = NULL;
2460 	u8 *p_pbl_virt = NULL;
2461 	void *p_virt = NULL;
2462 
2463 	size = page_cnt * sizeof(*pp_virt_addr_tbl);
2464 	pp_virt_addr_tbl = vzalloc(size);
2465 	if (!pp_virt_addr_tbl)
2466 		return -ENOMEM;
2467 
2468 	/* The allocation of the PBL table is done with its full size, since it
2469 	 * is expected to be successive.
2470 	 * qed_chain_init_pbl_mem() is called even in a case of an allocation
2471 	 * failure, since pp_virt_addr_tbl was previously allocated, and it
2472 	 * should be saved to allow its freeing during the error flow.
2473 	 */
2474 	size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
2475 	p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
2476 					size, &p_pbl_phys, GFP_KERNEL);
2477 	qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
2478 			       pp_virt_addr_tbl);
2479 	if (!p_pbl_virt)
2480 		return -ENOMEM;
2481 
2482 	for (i = 0; i < page_cnt; i++) {
2483 		p_virt = dma_alloc_coherent(&cdev->pdev->dev,
2484 					    QED_CHAIN_PAGE_SIZE,
2485 					    &p_phys, GFP_KERNEL);
2486 		if (!p_virt)
2487 			return -ENOMEM;
2488 
2489 		if (i == 0) {
2490 			qed_chain_init_mem(p_chain, p_virt, p_phys);
2491 			qed_chain_reset(p_chain);
2492 		}
2493 
2494 		/* Fill the PBL table with the physical address of the page */
2495 		*(dma_addr_t *)p_pbl_virt = p_phys;
2496 		/* Keep the virtual address of the page */
2497 		p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
2498 
2499 		p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
2500 	}
2501 
2502 	return 0;
2503 }
2504 
2505 int qed_chain_alloc(struct qed_dev *cdev,
2506 		    enum qed_chain_use_mode intended_use,
2507 		    enum qed_chain_mode mode,
2508 		    enum qed_chain_cnt_type cnt_type,
2509 		    u32 num_elems, size_t elem_size, struct qed_chain *p_chain)
2510 {
2511 	u32 page_cnt;
2512 	int rc = 0;
2513 
2514 	if (mode == QED_CHAIN_MODE_SINGLE)
2515 		page_cnt = 1;
2516 	else
2517 		page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
2518 
2519 	rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
2520 	if (rc) {
2521 		DP_NOTICE(cdev,
2522 			  "Cannot allocate a chain with the given arguments:\n");
2523 		DP_NOTICE(cdev,
2524 			  "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
2525 			  intended_use, mode, cnt_type, num_elems, elem_size);
2526 		return rc;
2527 	}
2528 
2529 	qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use,
2530 			      mode, cnt_type);
2531 
2532 	switch (mode) {
2533 	case QED_CHAIN_MODE_NEXT_PTR:
2534 		rc = qed_chain_alloc_next_ptr(cdev, p_chain);
2535 		break;
2536 	case QED_CHAIN_MODE_SINGLE:
2537 		rc = qed_chain_alloc_single(cdev, p_chain);
2538 		break;
2539 	case QED_CHAIN_MODE_PBL:
2540 		rc = qed_chain_alloc_pbl(cdev, p_chain);
2541 		break;
2542 	}
2543 	if (rc)
2544 		goto nomem;
2545 
2546 	return 0;
2547 
2548 nomem:
2549 	qed_chain_free(cdev, p_chain);
2550 	return rc;
2551 }
2552 
2553 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
2554 {
2555 	if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
2556 		u16 min, max;
2557 
2558 		min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE);
2559 		max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
2560 		DP_NOTICE(p_hwfn,
2561 			  "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
2562 			  src_id, min, max);
2563 
2564 		return -EINVAL;
2565 	}
2566 
2567 	*dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
2568 
2569 	return 0;
2570 }
2571 
2572 int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
2573 {
2574 	if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
2575 		u8 min, max;
2576 
2577 		min = (u8)RESC_START(p_hwfn, QED_VPORT);
2578 		max = min + RESC_NUM(p_hwfn, QED_VPORT);
2579 		DP_NOTICE(p_hwfn,
2580 			  "vport id [%d] is not valid, available indices [%d - %d]\n",
2581 			  src_id, min, max);
2582 
2583 		return -EINVAL;
2584 	}
2585 
2586 	*dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
2587 
2588 	return 0;
2589 }
2590 
2591 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
2592 {
2593 	if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
2594 		u8 min, max;
2595 
2596 		min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
2597 		max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
2598 		DP_NOTICE(p_hwfn,
2599 			  "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
2600 			  src_id, min, max);
2601 
2602 		return -EINVAL;
2603 	}
2604 
2605 	*dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
2606 
2607 	return 0;
2608 }
2609 
2610 static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low,
2611 				  u8 *p_filter)
2612 {
2613 	*p_high = p_filter[1] | (p_filter[0] << 8);
2614 	*p_low = p_filter[5] | (p_filter[4] << 8) |
2615 		 (p_filter[3] << 16) | (p_filter[2] << 24);
2616 }
2617 
2618 int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
2619 			   struct qed_ptt *p_ptt, u8 *p_filter)
2620 {
2621 	u32 high = 0, low = 0, en;
2622 	int i;
2623 
2624 	if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
2625 		return 0;
2626 
2627 	qed_llh_mac_to_filter(&high, &low, p_filter);
2628 
2629 	/* Find a free entry and utilize it */
2630 	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
2631 		en = qed_rd(p_hwfn, p_ptt,
2632 			    NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
2633 		if (en)
2634 			continue;
2635 		qed_wr(p_hwfn, p_ptt,
2636 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
2637 		       2 * i * sizeof(u32), low);
2638 		qed_wr(p_hwfn, p_ptt,
2639 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
2640 		       (2 * i + 1) * sizeof(u32), high);
2641 		qed_wr(p_hwfn, p_ptt,
2642 		       NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
2643 		qed_wr(p_hwfn, p_ptt,
2644 		       NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
2645 		       i * sizeof(u32), 0);
2646 		qed_wr(p_hwfn, p_ptt,
2647 		       NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
2648 		break;
2649 	}
2650 	if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
2651 		DP_NOTICE(p_hwfn,
2652 			  "Failed to find an empty LLH filter to utilize\n");
2653 		return -EINVAL;
2654 	}
2655 
2656 	DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
2657 		   "mac: %pM is added at %d\n",
2658 		   p_filter, i);
2659 
2660 	return 0;
2661 }
2662 
2663 void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
2664 			       struct qed_ptt *p_ptt, u8 *p_filter)
2665 {
2666 	u32 high = 0, low = 0;
2667 	int i;
2668 
2669 	if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
2670 		return;
2671 
2672 	qed_llh_mac_to_filter(&high, &low, p_filter);
2673 
2674 	/* Find the entry and clean it */
2675 	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
2676 		if (qed_rd(p_hwfn, p_ptt,
2677 			   NIG_REG_LLH_FUNC_FILTER_VALUE +
2678 			   2 * i * sizeof(u32)) != low)
2679 			continue;
2680 		if (qed_rd(p_hwfn, p_ptt,
2681 			   NIG_REG_LLH_FUNC_FILTER_VALUE +
2682 			   (2 * i + 1) * sizeof(u32)) != high)
2683 			continue;
2684 
2685 		qed_wr(p_hwfn, p_ptt,
2686 		       NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
2687 		qed_wr(p_hwfn, p_ptt,
2688 		       NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
2689 		qed_wr(p_hwfn, p_ptt,
2690 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
2691 		       (2 * i + 1) * sizeof(u32), 0);
2692 
2693 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
2694 			   "mac: %pM is removed from %d\n",
2695 			   p_filter, i);
2696 		break;
2697 	}
2698 	if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
2699 		DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
2700 }
2701 
2702 int
2703 qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
2704 			    struct qed_ptt *p_ptt,
2705 			    u16 source_port_or_eth_type,
2706 			    u16 dest_port, enum qed_llh_port_filter_type_t type)
2707 {
2708 	u32 high = 0, low = 0, en;
2709 	int i;
2710 
2711 	if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
2712 		return 0;
2713 
2714 	switch (type) {
2715 	case QED_LLH_FILTER_ETHERTYPE:
2716 		high = source_port_or_eth_type;
2717 		break;
2718 	case QED_LLH_FILTER_TCP_SRC_PORT:
2719 	case QED_LLH_FILTER_UDP_SRC_PORT:
2720 		low = source_port_or_eth_type << 16;
2721 		break;
2722 	case QED_LLH_FILTER_TCP_DEST_PORT:
2723 	case QED_LLH_FILTER_UDP_DEST_PORT:
2724 		low = dest_port;
2725 		break;
2726 	case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
2727 	case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
2728 		low = (source_port_or_eth_type << 16) | dest_port;
2729 		break;
2730 	default:
2731 		DP_NOTICE(p_hwfn,
2732 			  "Non valid LLH protocol filter type %d\n", type);
2733 		return -EINVAL;
2734 	}
2735 	/* Find a free entry and utilize it */
2736 	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
2737 		en = qed_rd(p_hwfn, p_ptt,
2738 			    NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
2739 		if (en)
2740 			continue;
2741 		qed_wr(p_hwfn, p_ptt,
2742 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
2743 		       2 * i * sizeof(u32), low);
2744 		qed_wr(p_hwfn, p_ptt,
2745 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
2746 		       (2 * i + 1) * sizeof(u32), high);
2747 		qed_wr(p_hwfn, p_ptt,
2748 		       NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
2749 		qed_wr(p_hwfn, p_ptt,
2750 		       NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
2751 		       i * sizeof(u32), 1 << type);
2752 		qed_wr(p_hwfn, p_ptt,
2753 		       NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
2754 		break;
2755 	}
2756 	if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
2757 		DP_NOTICE(p_hwfn,
2758 			  "Failed to find an empty LLH filter to utilize\n");
2759 		return -EINVAL;
2760 	}
2761 	switch (type) {
2762 	case QED_LLH_FILTER_ETHERTYPE:
2763 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
2764 			   "ETH type %x is added at %d\n",
2765 			   source_port_or_eth_type, i);
2766 		break;
2767 	case QED_LLH_FILTER_TCP_SRC_PORT:
2768 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
2769 			   "TCP src port %x is added at %d\n",
2770 			   source_port_or_eth_type, i);
2771 		break;
2772 	case QED_LLH_FILTER_UDP_SRC_PORT:
2773 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
2774 			   "UDP src port %x is added at %d\n",
2775 			   source_port_or_eth_type, i);
2776 		break;
2777 	case QED_LLH_FILTER_TCP_DEST_PORT:
2778 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
2779 			   "TCP dst port %x is added at %d\n", dest_port, i);
2780 		break;
2781 	case QED_LLH_FILTER_UDP_DEST_PORT:
2782 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
2783 			   "UDP dst port %x is added at %d\n", dest_port, i);
2784 		break;
2785 	case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
2786 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
2787 			   "TCP src/dst ports %x/%x are added at %d\n",
2788 			   source_port_or_eth_type, dest_port, i);
2789 		break;
2790 	case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
2791 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
2792 			   "UDP src/dst ports %x/%x are added at %d\n",
2793 			   source_port_or_eth_type, dest_port, i);
2794 		break;
2795 	}
2796 	return 0;
2797 }
2798 
2799 void
2800 qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
2801 			       struct qed_ptt *p_ptt,
2802 			       u16 source_port_or_eth_type,
2803 			       u16 dest_port,
2804 			       enum qed_llh_port_filter_type_t type)
2805 {
2806 	u32 high = 0, low = 0;
2807 	int i;
2808 
2809 	if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
2810 		return;
2811 
2812 	switch (type) {
2813 	case QED_LLH_FILTER_ETHERTYPE:
2814 		high = source_port_or_eth_type;
2815 		break;
2816 	case QED_LLH_FILTER_TCP_SRC_PORT:
2817 	case QED_LLH_FILTER_UDP_SRC_PORT:
2818 		low = source_port_or_eth_type << 16;
2819 		break;
2820 	case QED_LLH_FILTER_TCP_DEST_PORT:
2821 	case QED_LLH_FILTER_UDP_DEST_PORT:
2822 		low = dest_port;
2823 		break;
2824 	case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
2825 	case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
2826 		low = (source_port_or_eth_type << 16) | dest_port;
2827 		break;
2828 	default:
2829 		DP_NOTICE(p_hwfn,
2830 			  "Non valid LLH protocol filter type %d\n", type);
2831 		return;
2832 	}
2833 
2834 	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
2835 		if (!qed_rd(p_hwfn, p_ptt,
2836 			    NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)))
2837 			continue;
2838 		if (!qed_rd(p_hwfn, p_ptt,
2839 			    NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32)))
2840 			continue;
2841 		if (!(qed_rd(p_hwfn, p_ptt,
2842 			     NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
2843 			     i * sizeof(u32)) & BIT(type)))
2844 			continue;
2845 		if (qed_rd(p_hwfn, p_ptt,
2846 			   NIG_REG_LLH_FUNC_FILTER_VALUE +
2847 			   2 * i * sizeof(u32)) != low)
2848 			continue;
2849 		if (qed_rd(p_hwfn, p_ptt,
2850 			   NIG_REG_LLH_FUNC_FILTER_VALUE +
2851 			   (2 * i + 1) * sizeof(u32)) != high)
2852 			continue;
2853 
2854 		qed_wr(p_hwfn, p_ptt,
2855 		       NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
2856 		qed_wr(p_hwfn, p_ptt,
2857 		       NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
2858 		qed_wr(p_hwfn, p_ptt,
2859 		       NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
2860 		       i * sizeof(u32), 0);
2861 		qed_wr(p_hwfn, p_ptt,
2862 		       NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
2863 		qed_wr(p_hwfn, p_ptt,
2864 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
2865 		       (2 * i + 1) * sizeof(u32), 0);
2866 		break;
2867 	}
2868 
2869 	if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
2870 		DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
2871 }
2872 
2873 static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2874 			    u32 hw_addr, void *p_eth_qzone,
2875 			    size_t eth_qzone_size, u8 timeset)
2876 {
2877 	struct coalescing_timeset *p_coal_timeset;
2878 
2879 	if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) {
2880 		DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n");
2881 		return -EINVAL;
2882 	}
2883 
2884 	p_coal_timeset = p_eth_qzone;
2885 	memset(p_coal_timeset, 0, eth_qzone_size);
2886 	SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
2887 	SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
2888 	qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
2889 
2890 	return 0;
2891 }
2892 
2893 int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2894 			 u16 coalesce, u8 qid, u16 sb_id)
2895 {
2896 	struct ustorm_eth_queue_zone eth_qzone;
2897 	u8 timeset, timer_res;
2898 	u16 fw_qid = 0;
2899 	u32 address;
2900 	int rc;
2901 
2902 	/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
2903 	if (coalesce <= 0x7F) {
2904 		timer_res = 0;
2905 	} else if (coalesce <= 0xFF) {
2906 		timer_res = 1;
2907 	} else if (coalesce <= 0x1FF) {
2908 		timer_res = 2;
2909 	} else {
2910 		DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
2911 		return -EINVAL;
2912 	}
2913 	timeset = (u8)(coalesce >> timer_res);
2914 
2915 	rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
2916 	if (rc)
2917 		return rc;
2918 
2919 	rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false);
2920 	if (rc)
2921 		goto out;
2922 
2923 	address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
2924 
2925 	rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
2926 			      sizeof(struct ustorm_eth_queue_zone), timeset);
2927 	if (rc)
2928 		goto out;
2929 
2930 	p_hwfn->cdev->rx_coalesce_usecs = coalesce;
2931 out:
2932 	return rc;
2933 }
2934 
2935 int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
2936 			 u16 coalesce, u8 qid, u16 sb_id)
2937 {
2938 	struct xstorm_eth_queue_zone eth_qzone;
2939 	u8 timeset, timer_res;
2940 	u16 fw_qid = 0;
2941 	u32 address;
2942 	int rc;
2943 
2944 	/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
2945 	if (coalesce <= 0x7F) {
2946 		timer_res = 0;
2947 	} else if (coalesce <= 0xFF) {
2948 		timer_res = 1;
2949 	} else if (coalesce <= 0x1FF) {
2950 		timer_res = 2;
2951 	} else {
2952 		DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
2953 		return -EINVAL;
2954 	}
2955 	timeset = (u8)(coalesce >> timer_res);
2956 
2957 	rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
2958 	if (rc)
2959 		return rc;
2960 
2961 	rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true);
2962 	if (rc)
2963 		goto out;
2964 
2965 	address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
2966 
2967 	rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
2968 			      sizeof(struct xstorm_eth_queue_zone), timeset);
2969 	if (rc)
2970 		goto out;
2971 
2972 	p_hwfn->cdev->tx_coalesce_usecs = coalesce;
2973 out:
2974 	return rc;
2975 }
2976 
2977 /* Calculate final WFQ values for all vports and configure them.
2978  * After this configuration each vport will have
2979  * approx min rate =  min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
2980  */
2981 static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
2982 					     struct qed_ptt *p_ptt,
2983 					     u32 min_pf_rate)
2984 {
2985 	struct init_qm_vport_params *vport_params;
2986 	int i;
2987 
2988 	vport_params = p_hwfn->qm_info.qm_vport_params;
2989 
2990 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
2991 		u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
2992 
2993 		vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
2994 						min_pf_rate;
2995 		qed_init_vport_wfq(p_hwfn, p_ptt,
2996 				   vport_params[i].first_tx_pq_id,
2997 				   vport_params[i].vport_wfq);
2998 	}
2999 }
3000 
3001 static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
3002 				       u32 min_pf_rate)
3003 
3004 {
3005 	int i;
3006 
3007 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
3008 		p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
3009 }
3010 
3011 static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
3012 					   struct qed_ptt *p_ptt,
3013 					   u32 min_pf_rate)
3014 {
3015 	struct init_qm_vport_params *vport_params;
3016 	int i;
3017 
3018 	vport_params = p_hwfn->qm_info.qm_vport_params;
3019 
3020 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
3021 		qed_init_wfq_default_param(p_hwfn, min_pf_rate);
3022 		qed_init_vport_wfq(p_hwfn, p_ptt,
3023 				   vport_params[i].first_tx_pq_id,
3024 				   vport_params[i].vport_wfq);
3025 	}
3026 }
3027 
3028 /* This function performs several validations for WFQ
3029  * configuration and required min rate for a given vport
3030  * 1. req_rate must be greater than one percent of min_pf_rate.
3031  * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
3032  *    rates to get less than one percent of min_pf_rate.
3033  * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
3034  */
3035 static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
3036 			      u16 vport_id, u32 req_rate, u32 min_pf_rate)
3037 {
3038 	u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
3039 	int non_requested_count = 0, req_count = 0, i, num_vports;
3040 
3041 	num_vports = p_hwfn->qm_info.num_vports;
3042 
3043 	/* Accounting for the vports which are configured for WFQ explicitly */
3044 	for (i = 0; i < num_vports; i++) {
3045 		u32 tmp_speed;
3046 
3047 		if ((i != vport_id) &&
3048 		    p_hwfn->qm_info.wfq_data[i].configured) {
3049 			req_count++;
3050 			tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
3051 			total_req_min_rate += tmp_speed;
3052 		}
3053 	}
3054 
3055 	/* Include current vport data as well */
3056 	req_count++;
3057 	total_req_min_rate += req_rate;
3058 	non_requested_count = num_vports - req_count;
3059 
3060 	if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
3061 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
3062 			   "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
3063 			   vport_id, req_rate, min_pf_rate);
3064 		return -EINVAL;
3065 	}
3066 
3067 	if (num_vports > QED_WFQ_UNIT) {
3068 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
3069 			   "Number of vports is greater than %d\n",
3070 			   QED_WFQ_UNIT);
3071 		return -EINVAL;
3072 	}
3073 
3074 	if (total_req_min_rate > min_pf_rate) {
3075 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
3076 			   "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
3077 			   total_req_min_rate, min_pf_rate);
3078 		return -EINVAL;
3079 	}
3080 
3081 	total_left_rate	= min_pf_rate - total_req_min_rate;
3082 
3083 	left_rate_per_vp = total_left_rate / non_requested_count;
3084 	if (left_rate_per_vp <  min_pf_rate / QED_WFQ_UNIT) {
3085 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
3086 			   "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
3087 			   left_rate_per_vp, min_pf_rate);
3088 		return -EINVAL;
3089 	}
3090 
3091 	p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
3092 	p_hwfn->qm_info.wfq_data[vport_id].configured = true;
3093 
3094 	for (i = 0; i < num_vports; i++) {
3095 		if (p_hwfn->qm_info.wfq_data[i].configured)
3096 			continue;
3097 
3098 		p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
3099 	}
3100 
3101 	return 0;
3102 }
3103 
3104 static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
3105 				     struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
3106 {
3107 	struct qed_mcp_link_state *p_link;
3108 	int rc = 0;
3109 
3110 	p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
3111 
3112 	if (!p_link->min_pf_rate) {
3113 		p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
3114 		p_hwfn->qm_info.wfq_data[vp_id].configured = true;
3115 		return rc;
3116 	}
3117 
3118 	rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
3119 
3120 	if (!rc)
3121 		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
3122 						 p_link->min_pf_rate);
3123 	else
3124 		DP_NOTICE(p_hwfn,
3125 			  "Validation failed while configuring min rate\n");
3126 
3127 	return rc;
3128 }
3129 
3130 static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
3131 						 struct qed_ptt *p_ptt,
3132 						 u32 min_pf_rate)
3133 {
3134 	bool use_wfq = false;
3135 	int rc = 0;
3136 	u16 i;
3137 
3138 	/* Validate all pre configured vports for wfq */
3139 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
3140 		u32 rate;
3141 
3142 		if (!p_hwfn->qm_info.wfq_data[i].configured)
3143 			continue;
3144 
3145 		rate = p_hwfn->qm_info.wfq_data[i].min_speed;
3146 		use_wfq = true;
3147 
3148 		rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
3149 		if (rc) {
3150 			DP_NOTICE(p_hwfn,
3151 				  "WFQ validation failed while configuring min rate\n");
3152 			break;
3153 		}
3154 	}
3155 
3156 	if (!rc && use_wfq)
3157 		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
3158 	else
3159 		qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
3160 
3161 	return rc;
3162 }
3163 
3164 /* Main API for qed clients to configure vport min rate.
3165  * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
3166  * rate - Speed in Mbps needs to be assigned to a given vport.
3167  */
3168 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
3169 {
3170 	int i, rc = -EINVAL;
3171 
3172 	/* Currently not supported; Might change in future */
3173 	if (cdev->num_hwfns > 1) {
3174 		DP_NOTICE(cdev,
3175 			  "WFQ configuration is not supported for this device\n");
3176 		return rc;
3177 	}
3178 
3179 	for_each_hwfn(cdev, i) {
3180 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3181 		struct qed_ptt *p_ptt;
3182 
3183 		p_ptt = qed_ptt_acquire(p_hwfn);
3184 		if (!p_ptt)
3185 			return -EBUSY;
3186 
3187 		rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
3188 
3189 		if (rc) {
3190 			qed_ptt_release(p_hwfn, p_ptt);
3191 			return rc;
3192 		}
3193 
3194 		qed_ptt_release(p_hwfn, p_ptt);
3195 	}
3196 
3197 	return rc;
3198 }
3199 
3200 /* API to configure WFQ from mcp link change */
3201 void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
3202 					 struct qed_ptt *p_ptt, u32 min_pf_rate)
3203 {
3204 	int i;
3205 
3206 	if (cdev->num_hwfns > 1) {
3207 		DP_VERBOSE(cdev,
3208 			   NETIF_MSG_LINK,
3209 			   "WFQ configuration is not supported for this device\n");
3210 		return;
3211 	}
3212 
3213 	for_each_hwfn(cdev, i) {
3214 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3215 
3216 		__qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
3217 						      min_pf_rate);
3218 	}
3219 }
3220 
3221 int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
3222 				     struct qed_ptt *p_ptt,
3223 				     struct qed_mcp_link_state *p_link,
3224 				     u8 max_bw)
3225 {
3226 	int rc = 0;
3227 
3228 	p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
3229 
3230 	if (!p_link->line_speed && (max_bw != 100))
3231 		return rc;
3232 
3233 	p_link->speed = (p_link->line_speed * max_bw) / 100;
3234 	p_hwfn->qm_info.pf_rl = p_link->speed;
3235 
3236 	/* Since the limiter also affects Tx-switched traffic, we don't want it
3237 	 * to limit such traffic in case there's no actual limit.
3238 	 * In that case, set limit to imaginary high boundary.
3239 	 */
3240 	if (max_bw == 100)
3241 		p_hwfn->qm_info.pf_rl = 100000;
3242 
3243 	rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
3244 			    p_hwfn->qm_info.pf_rl);
3245 
3246 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
3247 		   "Configured MAX bandwidth to be %08x Mb/sec\n",
3248 		   p_link->speed);
3249 
3250 	return rc;
3251 }
3252 
3253 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
3254 int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
3255 {
3256 	int i, rc = -EINVAL;
3257 
3258 	if (max_bw < 1 || max_bw > 100) {
3259 		DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
3260 		return rc;
3261 	}
3262 
3263 	for_each_hwfn(cdev, i) {
3264 		struct qed_hwfn	*p_hwfn = &cdev->hwfns[i];
3265 		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
3266 		struct qed_mcp_link_state *p_link;
3267 		struct qed_ptt *p_ptt;
3268 
3269 		p_link = &p_lead->mcp_info->link_output;
3270 
3271 		p_ptt = qed_ptt_acquire(p_hwfn);
3272 		if (!p_ptt)
3273 			return -EBUSY;
3274 
3275 		rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
3276 						      p_link, max_bw);
3277 
3278 		qed_ptt_release(p_hwfn, p_ptt);
3279 
3280 		if (rc)
3281 			break;
3282 	}
3283 
3284 	return rc;
3285 }
3286 
3287 int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
3288 				     struct qed_ptt *p_ptt,
3289 				     struct qed_mcp_link_state *p_link,
3290 				     u8 min_bw)
3291 {
3292 	int rc = 0;
3293 
3294 	p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
3295 	p_hwfn->qm_info.pf_wfq = min_bw;
3296 
3297 	if (!p_link->line_speed)
3298 		return rc;
3299 
3300 	p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
3301 
3302 	rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
3303 
3304 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
3305 		   "Configured MIN bandwidth to be %d Mb/sec\n",
3306 		   p_link->min_pf_rate);
3307 
3308 	return rc;
3309 }
3310 
3311 /* Main API to configure PF min bandwidth where bw range is [1-100] */
3312 int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
3313 {
3314 	int i, rc = -EINVAL;
3315 
3316 	if (min_bw < 1 || min_bw > 100) {
3317 		DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
3318 		return rc;
3319 	}
3320 
3321 	for_each_hwfn(cdev, i) {
3322 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3323 		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
3324 		struct qed_mcp_link_state *p_link;
3325 		struct qed_ptt *p_ptt;
3326 
3327 		p_link = &p_lead->mcp_info->link_output;
3328 
3329 		p_ptt = qed_ptt_acquire(p_hwfn);
3330 		if (!p_ptt)
3331 			return -EBUSY;
3332 
3333 		rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
3334 						      p_link, min_bw);
3335 		if (rc) {
3336 			qed_ptt_release(p_hwfn, p_ptt);
3337 			return rc;
3338 		}
3339 
3340 		if (p_link->min_pf_rate) {
3341 			u32 min_rate = p_link->min_pf_rate;
3342 
3343 			rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
3344 								   p_ptt,
3345 								   min_rate);
3346 		}
3347 
3348 		qed_ptt_release(p_hwfn, p_ptt);
3349 	}
3350 
3351 	return rc;
3352 }
3353 
3354 void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3355 {
3356 	struct qed_mcp_link_state *p_link;
3357 
3358 	p_link = &p_hwfn->mcp_info->link_output;
3359 
3360 	if (p_link->min_pf_rate)
3361 		qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
3362 					       p_link->min_pf_rate);
3363 
3364 	memset(p_hwfn->qm_info.wfq_data, 0,
3365 	       sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
3366 }
3367