1fe56b9e6SYuval Mintz /* QLogic qed NIC Driver
2fe56b9e6SYuval Mintz  * Copyright (c) 2015 QLogic Corporation
3fe56b9e6SYuval Mintz  *
4fe56b9e6SYuval Mintz  * This software is available under the terms of the GNU General Public License
5fe56b9e6SYuval Mintz  * (GPL) Version 2, available from the file COPYING in the main directory of
6fe56b9e6SYuval Mintz  * this source tree.
7fe56b9e6SYuval Mintz  */
8fe56b9e6SYuval Mintz 
9fe56b9e6SYuval Mintz #include <linux/types.h>
10fe56b9e6SYuval Mintz #include <asm/byteorder.h>
11fe56b9e6SYuval Mintz #include <linux/io.h>
12fe56b9e6SYuval Mintz #include <linux/delay.h>
13fe56b9e6SYuval Mintz #include <linux/dma-mapping.h>
14fe56b9e6SYuval Mintz #include <linux/errno.h>
15fe56b9e6SYuval Mintz #include <linux/kernel.h>
16fe56b9e6SYuval Mintz #include <linux/mutex.h>
17fe56b9e6SYuval Mintz #include <linux/pci.h>
18fe56b9e6SYuval Mintz #include <linux/slab.h>
19fe56b9e6SYuval Mintz #include <linux/string.h>
20fe56b9e6SYuval Mintz #include <linux/etherdevice.h>
21fe56b9e6SYuval Mintz #include <linux/qed/qed_chain.h>
22fe56b9e6SYuval Mintz #include <linux/qed/qed_if.h>
23fe56b9e6SYuval Mintz #include "qed.h"
24fe56b9e6SYuval Mintz #include "qed_cxt.h"
2539651abdSSudarsana Reddy Kalluru #include "qed_dcbx.h"
26fe56b9e6SYuval Mintz #include "qed_dev_api.h"
27fe56b9e6SYuval Mintz #include "qed_hsi.h"
28fe56b9e6SYuval Mintz #include "qed_hw.h"
29fe56b9e6SYuval Mintz #include "qed_init_ops.h"
30fe56b9e6SYuval Mintz #include "qed_int.h"
31fe56b9e6SYuval Mintz #include "qed_mcp.h"
32fe56b9e6SYuval Mintz #include "qed_reg_addr.h"
33fe56b9e6SYuval Mintz #include "qed_sp.h"
3432a47e72SYuval Mintz #include "qed_sriov.h"
350b55e27dSYuval Mintz #include "qed_vf.h"
36fe56b9e6SYuval Mintz 
3739651abdSSudarsana Reddy Kalluru static spinlock_t qm_lock;
3839651abdSSudarsana Reddy Kalluru static bool qm_lock_init = false;
3939651abdSSudarsana Reddy Kalluru 
40fe56b9e6SYuval Mintz /* API common to all protocols */
41c2035eeaSRam Amrani enum BAR_ID {
42c2035eeaSRam Amrani 	BAR_ID_0,       /* used for GRC */
43c2035eeaSRam Amrani 	BAR_ID_1        /* Used for doorbells */
44c2035eeaSRam Amrani };
45c2035eeaSRam Amrani 
46c2035eeaSRam Amrani static u32 qed_hw_bar_size(struct qed_hwfn	*p_hwfn,
47c2035eeaSRam Amrani 			   enum BAR_ID		bar_id)
48c2035eeaSRam Amrani {
49c2035eeaSRam Amrani 	u32 bar_reg = (bar_id == BAR_ID_0 ?
50c2035eeaSRam Amrani 		       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
511408cc1fSYuval Mintz 	u32 val;
52c2035eeaSRam Amrani 
531408cc1fSYuval Mintz 	if (IS_VF(p_hwfn->cdev))
541408cc1fSYuval Mintz 		return 1 << 17;
551408cc1fSYuval Mintz 
561408cc1fSYuval Mintz 	val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
57c2035eeaSRam Amrani 	if (val)
58c2035eeaSRam Amrani 		return 1 << (val + 15);
59c2035eeaSRam Amrani 
60c2035eeaSRam Amrani 	/* Old MFW initialized above registered only conditionally */
61c2035eeaSRam Amrani 	if (p_hwfn->cdev->num_hwfns > 1) {
62c2035eeaSRam Amrani 		DP_INFO(p_hwfn,
63c2035eeaSRam Amrani 			"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
64c2035eeaSRam Amrani 			return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
65c2035eeaSRam Amrani 	} else {
66c2035eeaSRam Amrani 		DP_INFO(p_hwfn,
67c2035eeaSRam Amrani 			"BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
68c2035eeaSRam Amrani 			return 512 * 1024;
69c2035eeaSRam Amrani 	}
70c2035eeaSRam Amrani }
71c2035eeaSRam Amrani 
72fe56b9e6SYuval Mintz void qed_init_dp(struct qed_dev *cdev,
73fe56b9e6SYuval Mintz 		 u32 dp_module, u8 dp_level)
74fe56b9e6SYuval Mintz {
75fe56b9e6SYuval Mintz 	u32 i;
76fe56b9e6SYuval Mintz 
77fe56b9e6SYuval Mintz 	cdev->dp_level = dp_level;
78fe56b9e6SYuval Mintz 	cdev->dp_module = dp_module;
79fe56b9e6SYuval Mintz 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
80fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
81fe56b9e6SYuval Mintz 
82fe56b9e6SYuval Mintz 		p_hwfn->dp_level = dp_level;
83fe56b9e6SYuval Mintz 		p_hwfn->dp_module = dp_module;
84fe56b9e6SYuval Mintz 	}
85fe56b9e6SYuval Mintz }
86fe56b9e6SYuval Mintz 
87fe56b9e6SYuval Mintz void qed_init_struct(struct qed_dev *cdev)
88fe56b9e6SYuval Mintz {
89fe56b9e6SYuval Mintz 	u8 i;
90fe56b9e6SYuval Mintz 
91fe56b9e6SYuval Mintz 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
92fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
93fe56b9e6SYuval Mintz 
94fe56b9e6SYuval Mintz 		p_hwfn->cdev = cdev;
95fe56b9e6SYuval Mintz 		p_hwfn->my_id = i;
96fe56b9e6SYuval Mintz 		p_hwfn->b_active = false;
97fe56b9e6SYuval Mintz 
98fe56b9e6SYuval Mintz 		mutex_init(&p_hwfn->dmae_info.mutex);
99fe56b9e6SYuval Mintz 	}
100fe56b9e6SYuval Mintz 
101fe56b9e6SYuval Mintz 	/* hwfn 0 is always active */
102fe56b9e6SYuval Mintz 	cdev->hwfns[0].b_active = true;
103fe56b9e6SYuval Mintz 
104fe56b9e6SYuval Mintz 	/* set the default cache alignment to 128 */
105fe56b9e6SYuval Mintz 	cdev->cache_shift = 7;
106fe56b9e6SYuval Mintz }
107fe56b9e6SYuval Mintz 
108fe56b9e6SYuval Mintz static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
109fe56b9e6SYuval Mintz {
110fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
111fe56b9e6SYuval Mintz 
112fe56b9e6SYuval Mintz 	kfree(qm_info->qm_pq_params);
113fe56b9e6SYuval Mintz 	qm_info->qm_pq_params = NULL;
114fe56b9e6SYuval Mintz 	kfree(qm_info->qm_vport_params);
115fe56b9e6SYuval Mintz 	qm_info->qm_vport_params = NULL;
116fe56b9e6SYuval Mintz 	kfree(qm_info->qm_port_params);
117fe56b9e6SYuval Mintz 	qm_info->qm_port_params = NULL;
118bcd197c8SManish Chopra 	kfree(qm_info->wfq_data);
119bcd197c8SManish Chopra 	qm_info->wfq_data = NULL;
120fe56b9e6SYuval Mintz }
121fe56b9e6SYuval Mintz 
122fe56b9e6SYuval Mintz void qed_resc_free(struct qed_dev *cdev)
123fe56b9e6SYuval Mintz {
124fe56b9e6SYuval Mintz 	int i;
125fe56b9e6SYuval Mintz 
1261408cc1fSYuval Mintz 	if (IS_VF(cdev))
1271408cc1fSYuval Mintz 		return;
1281408cc1fSYuval Mintz 
129fe56b9e6SYuval Mintz 	kfree(cdev->fw_data);
130fe56b9e6SYuval Mintz 	cdev->fw_data = NULL;
131fe56b9e6SYuval Mintz 
132fe56b9e6SYuval Mintz 	kfree(cdev->reset_stats);
133fe56b9e6SYuval Mintz 
134fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
135fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
136fe56b9e6SYuval Mintz 
13725c089d7SYuval Mintz 		kfree(p_hwfn->p_tx_cids);
13825c089d7SYuval Mintz 		p_hwfn->p_tx_cids = NULL;
13925c089d7SYuval Mintz 		kfree(p_hwfn->p_rx_cids);
14025c089d7SYuval Mintz 		p_hwfn->p_rx_cids = NULL;
14125c089d7SYuval Mintz 	}
14225c089d7SYuval Mintz 
14325c089d7SYuval Mintz 	for_each_hwfn(cdev, i) {
14425c089d7SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
14525c089d7SYuval Mintz 
146fe56b9e6SYuval Mintz 		qed_cxt_mngr_free(p_hwfn);
147fe56b9e6SYuval Mintz 		qed_qm_info_free(p_hwfn);
148fe56b9e6SYuval Mintz 		qed_spq_free(p_hwfn);
149fe56b9e6SYuval Mintz 		qed_eq_free(p_hwfn, p_hwfn->p_eq);
150fe56b9e6SYuval Mintz 		qed_consq_free(p_hwfn, p_hwfn->p_consq);
151fe56b9e6SYuval Mintz 		qed_int_free(p_hwfn);
15232a47e72SYuval Mintz 		qed_iov_free(p_hwfn);
153fe56b9e6SYuval Mintz 		qed_dmae_info_free(p_hwfn);
15439651abdSSudarsana Reddy Kalluru 		qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
155fe56b9e6SYuval Mintz 	}
156fe56b9e6SYuval Mintz }
157fe56b9e6SYuval Mintz 
15879529291SSudarsana Reddy Kalluru static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
159fe56b9e6SYuval Mintz {
1601408cc1fSYuval Mintz 	u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
161fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
162fe56b9e6SYuval Mintz 	struct init_qm_port_params *p_qm_port;
163fe56b9e6SYuval Mintz 	u16 num_pqs, multi_cos_tcs = 1;
164cc3d5eb0SYuval Mintz 	u8 pf_wfq = qm_info->pf_wfq;
165cc3d5eb0SYuval Mintz 	u32 pf_rl = qm_info->pf_rl;
1661408cc1fSYuval Mintz 	u16 num_vfs = 0;
167fe56b9e6SYuval Mintz 
1681408cc1fSYuval Mintz #ifdef CONFIG_QED_SRIOV
1691408cc1fSYuval Mintz 	if (p_hwfn->cdev->p_iov_info)
1701408cc1fSYuval Mintz 		num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
1711408cc1fSYuval Mintz #endif
172fe56b9e6SYuval Mintz 	memset(qm_info, 0, sizeof(*qm_info));
173fe56b9e6SYuval Mintz 
1741408cc1fSYuval Mintz 	num_pqs = multi_cos_tcs + num_vfs + 1;	/* The '1' is for pure-LB */
175fe56b9e6SYuval Mintz 	num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
176fe56b9e6SYuval Mintz 
177fe56b9e6SYuval Mintz 	/* Sanity checking that setup requires legal number of resources */
178fe56b9e6SYuval Mintz 	if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
179fe56b9e6SYuval Mintz 		DP_ERR(p_hwfn,
180fe56b9e6SYuval Mintz 		       "Need too many Physical queues - 0x%04x when only %04x are available\n",
181fe56b9e6SYuval Mintz 		       num_pqs, RESC_NUM(p_hwfn, QED_PQ));
182fe56b9e6SYuval Mintz 		return -EINVAL;
183fe56b9e6SYuval Mintz 	}
184fe56b9e6SYuval Mintz 
185fe56b9e6SYuval Mintz 	/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
186fe56b9e6SYuval Mintz 	 */
18779529291SSudarsana Reddy Kalluru 	qm_info->qm_pq_params = kcalloc(num_pqs,
18879529291SSudarsana Reddy Kalluru 					sizeof(struct init_qm_pq_params),
18979529291SSudarsana Reddy Kalluru 					b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
190fe56b9e6SYuval Mintz 	if (!qm_info->qm_pq_params)
191fe56b9e6SYuval Mintz 		goto alloc_err;
192fe56b9e6SYuval Mintz 
19379529291SSudarsana Reddy Kalluru 	qm_info->qm_vport_params = kcalloc(num_vports,
19479529291SSudarsana Reddy Kalluru 					   sizeof(struct init_qm_vport_params),
19579529291SSudarsana Reddy Kalluru 					   b_sleepable ? GFP_KERNEL
19679529291SSudarsana Reddy Kalluru 						       : GFP_ATOMIC);
197fe56b9e6SYuval Mintz 	if (!qm_info->qm_vport_params)
198fe56b9e6SYuval Mintz 		goto alloc_err;
199fe56b9e6SYuval Mintz 
20079529291SSudarsana Reddy Kalluru 	qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
20179529291SSudarsana Reddy Kalluru 					  sizeof(struct init_qm_port_params),
20279529291SSudarsana Reddy Kalluru 					  b_sleepable ? GFP_KERNEL
20379529291SSudarsana Reddy Kalluru 						      : GFP_ATOMIC);
204fe56b9e6SYuval Mintz 	if (!qm_info->qm_port_params)
205fe56b9e6SYuval Mintz 		goto alloc_err;
206fe56b9e6SYuval Mintz 
20779529291SSudarsana Reddy Kalluru 	qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
20879529291SSudarsana Reddy Kalluru 				    b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
209bcd197c8SManish Chopra 	if (!qm_info->wfq_data)
210bcd197c8SManish Chopra 		goto alloc_err;
211bcd197c8SManish Chopra 
212fe56b9e6SYuval Mintz 	vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
213fe56b9e6SYuval Mintz 
214fe56b9e6SYuval Mintz 	/* First init per-TC PQs */
21539651abdSSudarsana Reddy Kalluru 	for (i = 0; i < multi_cos_tcs; i++) {
2161408cc1fSYuval Mintz 		struct init_qm_pq_params *params =
21739651abdSSudarsana Reddy Kalluru 		    &qm_info->qm_pq_params[curr_queue++];
218fe56b9e6SYuval Mintz 
21939651abdSSudarsana Reddy Kalluru 		if (p_hwfn->hw_info.personality == QED_PCI_ETH) {
220fe56b9e6SYuval Mintz 			params->vport_id = vport_id;
221fe56b9e6SYuval Mintz 			params->tc_id = p_hwfn->hw_info.non_offload_tc;
222fe56b9e6SYuval Mintz 			params->wrr_group = 1;
22339651abdSSudarsana Reddy Kalluru 		} else {
22439651abdSSudarsana Reddy Kalluru 			params->vport_id = vport_id;
22539651abdSSudarsana Reddy Kalluru 			params->tc_id = p_hwfn->hw_info.offload_tc;
22639651abdSSudarsana Reddy Kalluru 			params->wrr_group = 1;
22739651abdSSudarsana Reddy Kalluru 		}
228fe56b9e6SYuval Mintz 	}
229fe56b9e6SYuval Mintz 
230fe56b9e6SYuval Mintz 	/* Then init pure-LB PQ */
2311408cc1fSYuval Mintz 	qm_info->pure_lb_pq = curr_queue;
2321408cc1fSYuval Mintz 	qm_info->qm_pq_params[curr_queue].vport_id =
2331408cc1fSYuval Mintz 	    (u8) RESC_START(p_hwfn, QED_VPORT);
2341408cc1fSYuval Mintz 	qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
2351408cc1fSYuval Mintz 	qm_info->qm_pq_params[curr_queue].wrr_group = 1;
2361408cc1fSYuval Mintz 	curr_queue++;
237fe56b9e6SYuval Mintz 
238fe56b9e6SYuval Mintz 	qm_info->offload_pq = 0;
2391408cc1fSYuval Mintz 	/* Then init per-VF PQs */
2401408cc1fSYuval Mintz 	vf_offset = curr_queue;
2411408cc1fSYuval Mintz 	for (i = 0; i < num_vfs; i++) {
2421408cc1fSYuval Mintz 		/* First vport is used by the PF */
2431408cc1fSYuval Mintz 		qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
2441408cc1fSYuval Mintz 		qm_info->qm_pq_params[curr_queue].tc_id =
2451408cc1fSYuval Mintz 		    p_hwfn->hw_info.non_offload_tc;
2461408cc1fSYuval Mintz 		qm_info->qm_pq_params[curr_queue].wrr_group = 1;
2471408cc1fSYuval Mintz 		curr_queue++;
2481408cc1fSYuval Mintz 	}
2491408cc1fSYuval Mintz 
2501408cc1fSYuval Mintz 	qm_info->vf_queues_offset = vf_offset;
251fe56b9e6SYuval Mintz 	qm_info->num_pqs = num_pqs;
252fe56b9e6SYuval Mintz 	qm_info->num_vports = num_vports;
253fe56b9e6SYuval Mintz 
254fe56b9e6SYuval Mintz 	/* Initialize qm port parameters */
255fe56b9e6SYuval Mintz 	num_ports = p_hwfn->cdev->num_ports_in_engines;
256fe56b9e6SYuval Mintz 	for (i = 0; i < num_ports; i++) {
257fe56b9e6SYuval Mintz 		p_qm_port = &qm_info->qm_port_params[i];
258fe56b9e6SYuval Mintz 		p_qm_port->active = 1;
259fe56b9e6SYuval Mintz 		p_qm_port->num_active_phys_tcs = 4;
260fe56b9e6SYuval Mintz 		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
261fe56b9e6SYuval Mintz 		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
262fe56b9e6SYuval Mintz 	}
263fe56b9e6SYuval Mintz 
264fe56b9e6SYuval Mintz 	qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
265fe56b9e6SYuval Mintz 
266fe56b9e6SYuval Mintz 	qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
267fe56b9e6SYuval Mintz 
2681408cc1fSYuval Mintz 	qm_info->num_vf_pqs = num_vfs;
269fe56b9e6SYuval Mintz 	qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
270fe56b9e6SYuval Mintz 
271a64b02d5SManish Chopra 	for (i = 0; i < qm_info->num_vports; i++)
272a64b02d5SManish Chopra 		qm_info->qm_vport_params[i].vport_wfq = 1;
273a64b02d5SManish Chopra 
274fe56b9e6SYuval Mintz 	qm_info->vport_rl_en = 1;
275a64b02d5SManish Chopra 	qm_info->vport_wfq_en = 1;
276cc3d5eb0SYuval Mintz 	qm_info->pf_rl = pf_rl;
277cc3d5eb0SYuval Mintz 	qm_info->pf_wfq = pf_wfq;
278fe56b9e6SYuval Mintz 
279fe56b9e6SYuval Mintz 	return 0;
280fe56b9e6SYuval Mintz 
281fe56b9e6SYuval Mintz alloc_err:
282fe56b9e6SYuval Mintz 	DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
283bcd197c8SManish Chopra 	qed_qm_info_free(p_hwfn);
284fe56b9e6SYuval Mintz 	return -ENOMEM;
285fe56b9e6SYuval Mintz }
286fe56b9e6SYuval Mintz 
28739651abdSSudarsana Reddy Kalluru /* This function reconfigures the QM pf on the fly.
28839651abdSSudarsana Reddy Kalluru  * For this purpose we:
28939651abdSSudarsana Reddy Kalluru  * 1. reconfigure the QM database
29039651abdSSudarsana Reddy Kalluru  * 2. set new values to runtime arrat
29139651abdSSudarsana Reddy Kalluru  * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
29239651abdSSudarsana Reddy Kalluru  * 4. activate init tool in QM_PF stage
29339651abdSSudarsana Reddy Kalluru  * 5. send an sdm_qm_cmd through rbc interface to release the QM
29439651abdSSudarsana Reddy Kalluru  */
29539651abdSSudarsana Reddy Kalluru int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
29639651abdSSudarsana Reddy Kalluru {
29739651abdSSudarsana Reddy Kalluru 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
29839651abdSSudarsana Reddy Kalluru 	bool b_rc;
29939651abdSSudarsana Reddy Kalluru 	int rc;
30039651abdSSudarsana Reddy Kalluru 
30139651abdSSudarsana Reddy Kalluru 	/* qm_info is allocated in qed_init_qm_info() which is already called
30239651abdSSudarsana Reddy Kalluru 	 * from qed_resc_alloc() or previous call of qed_qm_reconf().
30339651abdSSudarsana Reddy Kalluru 	 * The allocated size may change each init, so we free it before next
30439651abdSSudarsana Reddy Kalluru 	 * allocation.
30539651abdSSudarsana Reddy Kalluru 	 */
30639651abdSSudarsana Reddy Kalluru 	qed_qm_info_free(p_hwfn);
30739651abdSSudarsana Reddy Kalluru 
30839651abdSSudarsana Reddy Kalluru 	/* initialize qed's qm data structure */
30979529291SSudarsana Reddy Kalluru 	rc = qed_init_qm_info(p_hwfn, false);
31039651abdSSudarsana Reddy Kalluru 	if (rc)
31139651abdSSudarsana Reddy Kalluru 		return rc;
31239651abdSSudarsana Reddy Kalluru 
31339651abdSSudarsana Reddy Kalluru 	/* stop PF's qm queues */
31439651abdSSudarsana Reddy Kalluru 	spin_lock_bh(&qm_lock);
31539651abdSSudarsana Reddy Kalluru 	b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
31639651abdSSudarsana Reddy Kalluru 				    qm_info->start_pq, qm_info->num_pqs);
31739651abdSSudarsana Reddy Kalluru 	spin_unlock_bh(&qm_lock);
31839651abdSSudarsana Reddy Kalluru 	if (!b_rc)
31939651abdSSudarsana Reddy Kalluru 		return -EINVAL;
32039651abdSSudarsana Reddy Kalluru 
32139651abdSSudarsana Reddy Kalluru 	/* clear the QM_PF runtime phase leftovers from previous init */
32239651abdSSudarsana Reddy Kalluru 	qed_init_clear_rt_data(p_hwfn);
32339651abdSSudarsana Reddy Kalluru 
32439651abdSSudarsana Reddy Kalluru 	/* prepare QM portion of runtime array */
32539651abdSSudarsana Reddy Kalluru 	qed_qm_init_pf(p_hwfn);
32639651abdSSudarsana Reddy Kalluru 
32739651abdSSudarsana Reddy Kalluru 	/* activate init tool on runtime array */
32839651abdSSudarsana Reddy Kalluru 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
32939651abdSSudarsana Reddy Kalluru 			  p_hwfn->hw_info.hw_mode);
33039651abdSSudarsana Reddy Kalluru 	if (rc)
33139651abdSSudarsana Reddy Kalluru 		return rc;
33239651abdSSudarsana Reddy Kalluru 
33339651abdSSudarsana Reddy Kalluru 	/* start PF's qm queues */
33439651abdSSudarsana Reddy Kalluru 	spin_lock_bh(&qm_lock);
33539651abdSSudarsana Reddy Kalluru 	b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
33639651abdSSudarsana Reddy Kalluru 				    qm_info->start_pq, qm_info->num_pqs);
33739651abdSSudarsana Reddy Kalluru 	spin_unlock_bh(&qm_lock);
33839651abdSSudarsana Reddy Kalluru 	if (!b_rc)
33939651abdSSudarsana Reddy Kalluru 		return -EINVAL;
34039651abdSSudarsana Reddy Kalluru 
34139651abdSSudarsana Reddy Kalluru 	return 0;
34239651abdSSudarsana Reddy Kalluru }
34339651abdSSudarsana Reddy Kalluru 
344fe56b9e6SYuval Mintz int qed_resc_alloc(struct qed_dev *cdev)
345fe56b9e6SYuval Mintz {
346fe56b9e6SYuval Mintz 	struct qed_consq *p_consq;
347fe56b9e6SYuval Mintz 	struct qed_eq *p_eq;
348fe56b9e6SYuval Mintz 	int i, rc = 0;
349fe56b9e6SYuval Mintz 
3501408cc1fSYuval Mintz 	if (IS_VF(cdev))
3511408cc1fSYuval Mintz 		return rc;
3521408cc1fSYuval Mintz 
353fe56b9e6SYuval Mintz 	cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
354fe56b9e6SYuval Mintz 	if (!cdev->fw_data)
355fe56b9e6SYuval Mintz 		return -ENOMEM;
356fe56b9e6SYuval Mintz 
35725c089d7SYuval Mintz 	/* Allocate Memory for the Queue->CID mapping */
35825c089d7SYuval Mintz 	for_each_hwfn(cdev, i) {
35925c089d7SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
36025c089d7SYuval Mintz 		int tx_size = sizeof(struct qed_hw_cid_data) *
36125c089d7SYuval Mintz 				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
36225c089d7SYuval Mintz 		int rx_size = sizeof(struct qed_hw_cid_data) *
36325c089d7SYuval Mintz 				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
36425c089d7SYuval Mintz 
36525c089d7SYuval Mintz 		p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
36625c089d7SYuval Mintz 		if (!p_hwfn->p_tx_cids) {
36725c089d7SYuval Mintz 			DP_NOTICE(p_hwfn,
36825c089d7SYuval Mintz 				  "Failed to allocate memory for Tx Cids\n");
3699b15acbfSDan Carpenter 			rc = -ENOMEM;
37025c089d7SYuval Mintz 			goto alloc_err;
37125c089d7SYuval Mintz 		}
37225c089d7SYuval Mintz 
37325c089d7SYuval Mintz 		p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
37425c089d7SYuval Mintz 		if (!p_hwfn->p_rx_cids) {
37525c089d7SYuval Mintz 			DP_NOTICE(p_hwfn,
37625c089d7SYuval Mintz 				  "Failed to allocate memory for Rx Cids\n");
3779b15acbfSDan Carpenter 			rc = -ENOMEM;
37825c089d7SYuval Mintz 			goto alloc_err;
37925c089d7SYuval Mintz 		}
38025c089d7SYuval Mintz 	}
38125c089d7SYuval Mintz 
382fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
383fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
384fe56b9e6SYuval Mintz 
385fe56b9e6SYuval Mintz 		/* First allocate the context manager structure */
386fe56b9e6SYuval Mintz 		rc = qed_cxt_mngr_alloc(p_hwfn);
387fe56b9e6SYuval Mintz 		if (rc)
388fe56b9e6SYuval Mintz 			goto alloc_err;
389fe56b9e6SYuval Mintz 
390fe56b9e6SYuval Mintz 		/* Set the HW cid/tid numbers (in the contest manager)
391fe56b9e6SYuval Mintz 		 * Must be done prior to any further computations.
392fe56b9e6SYuval Mintz 		 */
393fe56b9e6SYuval Mintz 		rc = qed_cxt_set_pf_params(p_hwfn);
394fe56b9e6SYuval Mintz 		if (rc)
395fe56b9e6SYuval Mintz 			goto alloc_err;
396fe56b9e6SYuval Mintz 
397fe56b9e6SYuval Mintz 		/* Prepare and process QM requirements */
39879529291SSudarsana Reddy Kalluru 		rc = qed_init_qm_info(p_hwfn, true);
399fe56b9e6SYuval Mintz 		if (rc)
400fe56b9e6SYuval Mintz 			goto alloc_err;
401fe56b9e6SYuval Mintz 
402fe56b9e6SYuval Mintz 		/* Compute the ILT client partition */
403fe56b9e6SYuval Mintz 		rc = qed_cxt_cfg_ilt_compute(p_hwfn);
404fe56b9e6SYuval Mintz 		if (rc)
405fe56b9e6SYuval Mintz 			goto alloc_err;
406fe56b9e6SYuval Mintz 
407fe56b9e6SYuval Mintz 		/* CID map / ILT shadow table / T2
408fe56b9e6SYuval Mintz 		 * The talbes sizes are determined by the computations above
409fe56b9e6SYuval Mintz 		 */
410fe56b9e6SYuval Mintz 		rc = qed_cxt_tables_alloc(p_hwfn);
411fe56b9e6SYuval Mintz 		if (rc)
412fe56b9e6SYuval Mintz 			goto alloc_err;
413fe56b9e6SYuval Mintz 
414fe56b9e6SYuval Mintz 		/* SPQ, must follow ILT because initializes SPQ context */
415fe56b9e6SYuval Mintz 		rc = qed_spq_alloc(p_hwfn);
416fe56b9e6SYuval Mintz 		if (rc)
417fe56b9e6SYuval Mintz 			goto alloc_err;
418fe56b9e6SYuval Mintz 
419fe56b9e6SYuval Mintz 		/* SP status block allocation */
420fe56b9e6SYuval Mintz 		p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
421fe56b9e6SYuval Mintz 							 RESERVED_PTT_DPC);
422fe56b9e6SYuval Mintz 
423fe56b9e6SYuval Mintz 		rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
424fe56b9e6SYuval Mintz 		if (rc)
425fe56b9e6SYuval Mintz 			goto alloc_err;
426fe56b9e6SYuval Mintz 
42732a47e72SYuval Mintz 		rc = qed_iov_alloc(p_hwfn);
42832a47e72SYuval Mintz 		if (rc)
42932a47e72SYuval Mintz 			goto alloc_err;
43032a47e72SYuval Mintz 
431fe56b9e6SYuval Mintz 		/* EQ */
432fe56b9e6SYuval Mintz 		p_eq = qed_eq_alloc(p_hwfn, 256);
4339b15acbfSDan Carpenter 		if (!p_eq) {
4349b15acbfSDan Carpenter 			rc = -ENOMEM;
435fe56b9e6SYuval Mintz 			goto alloc_err;
4369b15acbfSDan Carpenter 		}
437fe56b9e6SYuval Mintz 		p_hwfn->p_eq = p_eq;
438fe56b9e6SYuval Mintz 
439fe56b9e6SYuval Mintz 		p_consq = qed_consq_alloc(p_hwfn);
4409b15acbfSDan Carpenter 		if (!p_consq) {
4419b15acbfSDan Carpenter 			rc = -ENOMEM;
442fe56b9e6SYuval Mintz 			goto alloc_err;
4439b15acbfSDan Carpenter 		}
444fe56b9e6SYuval Mintz 		p_hwfn->p_consq = p_consq;
445fe56b9e6SYuval Mintz 
446fe56b9e6SYuval Mintz 		/* DMA info initialization */
447fe56b9e6SYuval Mintz 		rc = qed_dmae_info_alloc(p_hwfn);
448fe56b9e6SYuval Mintz 		if (rc) {
449fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn,
450fe56b9e6SYuval Mintz 				  "Failed to allocate memory for dmae_info structure\n");
451fe56b9e6SYuval Mintz 			goto alloc_err;
452fe56b9e6SYuval Mintz 		}
45339651abdSSudarsana Reddy Kalluru 
45439651abdSSudarsana Reddy Kalluru 		/* DCBX initialization */
45539651abdSSudarsana Reddy Kalluru 		rc = qed_dcbx_info_alloc(p_hwfn);
45639651abdSSudarsana Reddy Kalluru 		if (rc) {
45739651abdSSudarsana Reddy Kalluru 			DP_NOTICE(p_hwfn,
45839651abdSSudarsana Reddy Kalluru 				  "Failed to allocate memory for dcbx structure\n");
45939651abdSSudarsana Reddy Kalluru 			goto alloc_err;
46039651abdSSudarsana Reddy Kalluru 		}
461fe56b9e6SYuval Mintz 	}
462fe56b9e6SYuval Mintz 
463fe56b9e6SYuval Mintz 	cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
464fe56b9e6SYuval Mintz 	if (!cdev->reset_stats) {
465fe56b9e6SYuval Mintz 		DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
4669b15acbfSDan Carpenter 		rc = -ENOMEM;
467fe56b9e6SYuval Mintz 		goto alloc_err;
468fe56b9e6SYuval Mintz 	}
469fe56b9e6SYuval Mintz 
470fe56b9e6SYuval Mintz 	return 0;
471fe56b9e6SYuval Mintz 
472fe56b9e6SYuval Mintz alloc_err:
473fe56b9e6SYuval Mintz 	qed_resc_free(cdev);
474fe56b9e6SYuval Mintz 	return rc;
475fe56b9e6SYuval Mintz }
476fe56b9e6SYuval Mintz 
477fe56b9e6SYuval Mintz void qed_resc_setup(struct qed_dev *cdev)
478fe56b9e6SYuval Mintz {
479fe56b9e6SYuval Mintz 	int i;
480fe56b9e6SYuval Mintz 
4811408cc1fSYuval Mintz 	if (IS_VF(cdev))
4821408cc1fSYuval Mintz 		return;
4831408cc1fSYuval Mintz 
484fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
485fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
486fe56b9e6SYuval Mintz 
487fe56b9e6SYuval Mintz 		qed_cxt_mngr_setup(p_hwfn);
488fe56b9e6SYuval Mintz 		qed_spq_setup(p_hwfn);
489fe56b9e6SYuval Mintz 		qed_eq_setup(p_hwfn, p_hwfn->p_eq);
490fe56b9e6SYuval Mintz 		qed_consq_setup(p_hwfn, p_hwfn->p_consq);
491fe56b9e6SYuval Mintz 
492fe56b9e6SYuval Mintz 		/* Read shadow of current MFW mailbox */
493fe56b9e6SYuval Mintz 		qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
494fe56b9e6SYuval Mintz 		memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
495fe56b9e6SYuval Mintz 		       p_hwfn->mcp_info->mfw_mb_cur,
496fe56b9e6SYuval Mintz 		       p_hwfn->mcp_info->mfw_mb_length);
497fe56b9e6SYuval Mintz 
498fe56b9e6SYuval Mintz 		qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
49932a47e72SYuval Mintz 
50032a47e72SYuval Mintz 		qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
501fe56b9e6SYuval Mintz 	}
502fe56b9e6SYuval Mintz }
503fe56b9e6SYuval Mintz 
504fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_CNT          (100)
505fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_TIME         (10)
506fe56b9e6SYuval Mintz int qed_final_cleanup(struct qed_hwfn *p_hwfn,
5070b55e27dSYuval Mintz 		      struct qed_ptt *p_ptt, u16 id, bool is_vf)
508fe56b9e6SYuval Mintz {
509fe56b9e6SYuval Mintz 	u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
510fe56b9e6SYuval Mintz 	int rc = -EBUSY;
511fe56b9e6SYuval Mintz 
512fc48b7a6SYuval Mintz 	addr = GTT_BAR0_MAP_REG_USDM_RAM +
513fc48b7a6SYuval Mintz 		USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
514fe56b9e6SYuval Mintz 
5150b55e27dSYuval Mintz 	if (is_vf)
5160b55e27dSYuval Mintz 		id += 0x10;
5170b55e27dSYuval Mintz 
518fc48b7a6SYuval Mintz 	command |= X_FINAL_CLEANUP_AGG_INT <<
519fc48b7a6SYuval Mintz 		SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
520fc48b7a6SYuval Mintz 	command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
521fc48b7a6SYuval Mintz 	command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
522fc48b7a6SYuval Mintz 	command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
523fe56b9e6SYuval Mintz 
524fe56b9e6SYuval Mintz 	/* Make sure notification is not set before initiating final cleanup */
525fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, addr)) {
526fe56b9e6SYuval Mintz 		DP_NOTICE(
527fe56b9e6SYuval Mintz 			p_hwfn,
528fe56b9e6SYuval Mintz 			"Unexpected; Found final cleanup notification before initiating final cleanup\n");
529fe56b9e6SYuval Mintz 		REG_WR(p_hwfn, addr, 0);
530fe56b9e6SYuval Mintz 	}
531fe56b9e6SYuval Mintz 
532fe56b9e6SYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
533fe56b9e6SYuval Mintz 		   "Sending final cleanup for PFVF[%d] [Command %08x\n]",
534fe56b9e6SYuval Mintz 		   id, command);
535fe56b9e6SYuval Mintz 
536fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
537fe56b9e6SYuval Mintz 
538fe56b9e6SYuval Mintz 	/* Poll until completion */
539fe56b9e6SYuval Mintz 	while (!REG_RD(p_hwfn, addr) && count--)
540fe56b9e6SYuval Mintz 		msleep(FINAL_CLEANUP_POLL_TIME);
541fe56b9e6SYuval Mintz 
542fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, addr))
543fe56b9e6SYuval Mintz 		rc = 0;
544fe56b9e6SYuval Mintz 	else
545fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn,
546fe56b9e6SYuval Mintz 			  "Failed to receive FW final cleanup notification\n");
547fe56b9e6SYuval Mintz 
548fe56b9e6SYuval Mintz 	/* Cleanup afterwards */
549fe56b9e6SYuval Mintz 	REG_WR(p_hwfn, addr, 0);
550fe56b9e6SYuval Mintz 
551fe56b9e6SYuval Mintz 	return rc;
552fe56b9e6SYuval Mintz }
553fe56b9e6SYuval Mintz 
554fe56b9e6SYuval Mintz static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
555fe56b9e6SYuval Mintz {
556fe56b9e6SYuval Mintz 	int hw_mode = 0;
557fe56b9e6SYuval Mintz 
55812e09c69SYuval Mintz 	hw_mode = (1 << MODE_BB_B0);
559fe56b9e6SYuval Mintz 
560fe56b9e6SYuval Mintz 	switch (p_hwfn->cdev->num_ports_in_engines) {
561fe56b9e6SYuval Mintz 	case 1:
562fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
563fe56b9e6SYuval Mintz 		break;
564fe56b9e6SYuval Mintz 	case 2:
565fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
566fe56b9e6SYuval Mintz 		break;
567fe56b9e6SYuval Mintz 	case 4:
568fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
569fe56b9e6SYuval Mintz 		break;
570fe56b9e6SYuval Mintz 	default:
571fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
572fe56b9e6SYuval Mintz 			  p_hwfn->cdev->num_ports_in_engines);
573fe56b9e6SYuval Mintz 		return;
574fe56b9e6SYuval Mintz 	}
575fe56b9e6SYuval Mintz 
576fe56b9e6SYuval Mintz 	switch (p_hwfn->cdev->mf_mode) {
577fc48b7a6SYuval Mintz 	case QED_MF_DEFAULT:
578fc48b7a6SYuval Mintz 	case QED_MF_NPAR:
579fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SI;
580fe56b9e6SYuval Mintz 		break;
581fc48b7a6SYuval Mintz 	case QED_MF_OVLAN:
582fc48b7a6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SD;
583fc48b7a6SYuval Mintz 		break;
584fe56b9e6SYuval Mintz 	default:
585fc48b7a6SYuval Mintz 		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
586fc48b7a6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SI;
587fe56b9e6SYuval Mintz 	}
588fe56b9e6SYuval Mintz 
589fe56b9e6SYuval Mintz 	hw_mode |= 1 << MODE_ASIC;
590fe56b9e6SYuval Mintz 
5911af9dcf7SYuval Mintz 	if (p_hwfn->cdev->num_hwfns > 1)
5921af9dcf7SYuval Mintz 		hw_mode |= 1 << MODE_100G;
5931af9dcf7SYuval Mintz 
594fe56b9e6SYuval Mintz 	p_hwfn->hw_info.hw_mode = hw_mode;
5951af9dcf7SYuval Mintz 
5961af9dcf7SYuval Mintz 	DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
5971af9dcf7SYuval Mintz 		   "Configuring function for hw_mode: 0x%08x\n",
5981af9dcf7SYuval Mintz 		   p_hwfn->hw_info.hw_mode);
599fe56b9e6SYuval Mintz }
600fe56b9e6SYuval Mintz 
601fe56b9e6SYuval Mintz /* Init run time data for all PFs on an engine. */
602fe56b9e6SYuval Mintz static void qed_init_cau_rt_data(struct qed_dev *cdev)
603fe56b9e6SYuval Mintz {
604fe56b9e6SYuval Mintz 	u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
605fe56b9e6SYuval Mintz 	int i, sb_id;
606fe56b9e6SYuval Mintz 
607fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
608fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
609fe56b9e6SYuval Mintz 		struct qed_igu_info *p_igu_info;
610fe56b9e6SYuval Mintz 		struct qed_igu_block *p_block;
611fe56b9e6SYuval Mintz 		struct cau_sb_entry sb_entry;
612fe56b9e6SYuval Mintz 
613fe56b9e6SYuval Mintz 		p_igu_info = p_hwfn->hw_info.p_igu_info;
614fe56b9e6SYuval Mintz 
615fe56b9e6SYuval Mintz 		for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
616fe56b9e6SYuval Mintz 		     sb_id++) {
617fe56b9e6SYuval Mintz 			p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
618fe56b9e6SYuval Mintz 			if (!p_block->is_pf)
619fe56b9e6SYuval Mintz 				continue;
620fe56b9e6SYuval Mintz 
621fe56b9e6SYuval Mintz 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
622fe56b9e6SYuval Mintz 					      p_block->function_id,
623fe56b9e6SYuval Mintz 					      0, 0);
624fe56b9e6SYuval Mintz 			STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
625fe56b9e6SYuval Mintz 					 sb_entry);
626fe56b9e6SYuval Mintz 		}
627fe56b9e6SYuval Mintz 	}
628fe56b9e6SYuval Mintz }
629fe56b9e6SYuval Mintz 
630fe56b9e6SYuval Mintz static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
631fe56b9e6SYuval Mintz 			      struct qed_ptt *p_ptt,
632fe56b9e6SYuval Mintz 			      int hw_mode)
633fe56b9e6SYuval Mintz {
634fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
635fe56b9e6SYuval Mintz 	struct qed_qm_common_rt_init_params params;
636fe56b9e6SYuval Mintz 	struct qed_dev *cdev = p_hwfn->cdev;
6371408cc1fSYuval Mintz 	u32 concrete_fid;
638fe56b9e6SYuval Mintz 	int rc = 0;
6391408cc1fSYuval Mintz 	u8 vf_id;
640fe56b9e6SYuval Mintz 
641fe56b9e6SYuval Mintz 	qed_init_cau_rt_data(cdev);
642fe56b9e6SYuval Mintz 
643fe56b9e6SYuval Mintz 	/* Program GTT windows */
644fe56b9e6SYuval Mintz 	qed_gtt_init(p_hwfn);
645fe56b9e6SYuval Mintz 
646fe56b9e6SYuval Mintz 	if (p_hwfn->mcp_info) {
647fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.bandwidth_max)
648fe56b9e6SYuval Mintz 			qm_info->pf_rl_en = 1;
649fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.bandwidth_min)
650fe56b9e6SYuval Mintz 			qm_info->pf_wfq_en = 1;
651fe56b9e6SYuval Mintz 	}
652fe56b9e6SYuval Mintz 
653fe56b9e6SYuval Mintz 	memset(&params, 0, sizeof(params));
654fe56b9e6SYuval Mintz 	params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
655fe56b9e6SYuval Mintz 	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
656fe56b9e6SYuval Mintz 	params.pf_rl_en = qm_info->pf_rl_en;
657fe56b9e6SYuval Mintz 	params.pf_wfq_en = qm_info->pf_wfq_en;
658fe56b9e6SYuval Mintz 	params.vport_rl_en = qm_info->vport_rl_en;
659fe56b9e6SYuval Mintz 	params.vport_wfq_en = qm_info->vport_wfq_en;
660fe56b9e6SYuval Mintz 	params.port_params = qm_info->qm_port_params;
661fe56b9e6SYuval Mintz 
662fe56b9e6SYuval Mintz 	qed_qm_common_rt_init(p_hwfn, &params);
663fe56b9e6SYuval Mintz 
664fe56b9e6SYuval Mintz 	qed_cxt_hw_init_common(p_hwfn);
665fe56b9e6SYuval Mintz 
666fe56b9e6SYuval Mintz 	/* Close gate from NIG to BRB/Storm; By default they are open, but
667fe56b9e6SYuval Mintz 	 * we close them to prevent NIG from passing data to reset blocks.
668fe56b9e6SYuval Mintz 	 * Should have been done in the ENGINE phase, but init-tool lacks
669fe56b9e6SYuval Mintz 	 * proper port-pretend capabilities.
670fe56b9e6SYuval Mintz 	 */
671fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
672fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
673fe56b9e6SYuval Mintz 	qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
674fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
675fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
676fe56b9e6SYuval Mintz 	qed_port_unpretend(p_hwfn, p_ptt);
677fe56b9e6SYuval Mintz 
678fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
679fe56b9e6SYuval Mintz 	if (rc != 0)
680fe56b9e6SYuval Mintz 		return rc;
681fe56b9e6SYuval Mintz 
682fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
683fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
684fe56b9e6SYuval Mintz 
685fe56b9e6SYuval Mintz 	/* Disable relaxed ordering in the PCI config space */
686fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, 0x20b4,
687fe56b9e6SYuval Mintz 	       qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
688fe56b9e6SYuval Mintz 
6891408cc1fSYuval Mintz 	for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
6901408cc1fSYuval Mintz 		concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
6911408cc1fSYuval Mintz 		qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
6921408cc1fSYuval Mintz 		qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
6931408cc1fSYuval Mintz 	}
6941408cc1fSYuval Mintz 	/* pretend to original PF */
6951408cc1fSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
6961408cc1fSYuval Mintz 
697fe56b9e6SYuval Mintz 	return rc;
698fe56b9e6SYuval Mintz }
699fe56b9e6SYuval Mintz 
700fe56b9e6SYuval Mintz static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
701fe56b9e6SYuval Mintz 			    struct qed_ptt *p_ptt,
702fe56b9e6SYuval Mintz 			    int hw_mode)
703fe56b9e6SYuval Mintz {
704fe56b9e6SYuval Mintz 	int rc = 0;
705fe56b9e6SYuval Mintz 
706fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
707fe56b9e6SYuval Mintz 			  hw_mode);
708fe56b9e6SYuval Mintz 	return rc;
709fe56b9e6SYuval Mintz }
710fe56b9e6SYuval Mintz 
711fe56b9e6SYuval Mintz static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
712fe56b9e6SYuval Mintz 			  struct qed_ptt *p_ptt,
713464f6645SManish Chopra 			  struct qed_tunn_start_params *p_tunn,
714fe56b9e6SYuval Mintz 			  int hw_mode,
715fe56b9e6SYuval Mintz 			  bool b_hw_start,
716fe56b9e6SYuval Mintz 			  enum qed_int_mode int_mode,
717fe56b9e6SYuval Mintz 			  bool allow_npar_tx_switch)
718fe56b9e6SYuval Mintz {
719fe56b9e6SYuval Mintz 	u8 rel_pf_id = p_hwfn->rel_pf_id;
720fe56b9e6SYuval Mintz 	int rc = 0;
721fe56b9e6SYuval Mintz 
722fe56b9e6SYuval Mintz 	if (p_hwfn->mcp_info) {
723fe56b9e6SYuval Mintz 		struct qed_mcp_function_info *p_info;
724fe56b9e6SYuval Mintz 
725fe56b9e6SYuval Mintz 		p_info = &p_hwfn->mcp_info->func_info;
726fe56b9e6SYuval Mintz 		if (p_info->bandwidth_min)
727fe56b9e6SYuval Mintz 			p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
728fe56b9e6SYuval Mintz 
729fe56b9e6SYuval Mintz 		/* Update rate limit once we'll actually have a link */
7304b01e519SManish Chopra 		p_hwfn->qm_info.pf_rl = 100000;
731fe56b9e6SYuval Mintz 	}
732fe56b9e6SYuval Mintz 
733fe56b9e6SYuval Mintz 	qed_cxt_hw_init_pf(p_hwfn);
734fe56b9e6SYuval Mintz 
735fe56b9e6SYuval Mintz 	qed_int_igu_init_rt(p_hwfn);
736fe56b9e6SYuval Mintz 
737fe56b9e6SYuval Mintz 	/* Set VLAN in NIG if needed */
738fe56b9e6SYuval Mintz 	if (hw_mode & (1 << MODE_MF_SD)) {
739fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
740fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
741fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
742fe56b9e6SYuval Mintz 			     p_hwfn->hw_info.ovlan);
743fe56b9e6SYuval Mintz 	}
744fe56b9e6SYuval Mintz 
745fe56b9e6SYuval Mintz 	/* Enable classification by MAC if needed */
74687aec47dSDan Carpenter 	if (hw_mode & (1 << MODE_MF_SI)) {
747fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
748fe56b9e6SYuval Mintz 			   "Configuring TAGMAC_CLS_TYPE\n");
749fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn,
750fe56b9e6SYuval Mintz 			     NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
751fe56b9e6SYuval Mintz 	}
752fe56b9e6SYuval Mintz 
753fe56b9e6SYuval Mintz 	/* Protocl Configuration  */
754fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
755fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
756fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
757fe56b9e6SYuval Mintz 
758fe56b9e6SYuval Mintz 	/* Cleanup chip from previous driver if such remains exist */
7590b55e27dSYuval Mintz 	rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
760fe56b9e6SYuval Mintz 	if (rc != 0)
761fe56b9e6SYuval Mintz 		return rc;
762fe56b9e6SYuval Mintz 
763fe56b9e6SYuval Mintz 	/* PF Init sequence */
764fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
765fe56b9e6SYuval Mintz 	if (rc)
766fe56b9e6SYuval Mintz 		return rc;
767fe56b9e6SYuval Mintz 
768fe56b9e6SYuval Mintz 	/* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
769fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
770fe56b9e6SYuval Mintz 	if (rc)
771fe56b9e6SYuval Mintz 		return rc;
772fe56b9e6SYuval Mintz 
773fe56b9e6SYuval Mintz 	/* Pure runtime initializations - directly to the HW  */
774fe56b9e6SYuval Mintz 	qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
775fe56b9e6SYuval Mintz 
776fe56b9e6SYuval Mintz 	if (b_hw_start) {
777fe56b9e6SYuval Mintz 		/* enable interrupts */
778fe56b9e6SYuval Mintz 		qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
779fe56b9e6SYuval Mintz 
780fe56b9e6SYuval Mintz 		/* send function start command */
781831bfb0eSYuval Mintz 		rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
782831bfb0eSYuval Mintz 				     allow_npar_tx_switch);
783fe56b9e6SYuval Mintz 		if (rc)
784fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
785fe56b9e6SYuval Mintz 	}
786fe56b9e6SYuval Mintz 	return rc;
787fe56b9e6SYuval Mintz }
788fe56b9e6SYuval Mintz 
789fe56b9e6SYuval Mintz static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
790fe56b9e6SYuval Mintz 			       struct qed_ptt *p_ptt,
791fe56b9e6SYuval Mintz 			       u8 enable)
792fe56b9e6SYuval Mintz {
793fe56b9e6SYuval Mintz 	u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
794fe56b9e6SYuval Mintz 
795fe56b9e6SYuval Mintz 	/* Change PF in PXP */
796fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt,
797fe56b9e6SYuval Mintz 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
798fe56b9e6SYuval Mintz 
799fe56b9e6SYuval Mintz 	/* wait until value is set - try for 1 second every 50us */
800fe56b9e6SYuval Mintz 	for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
801fe56b9e6SYuval Mintz 		val = qed_rd(p_hwfn, p_ptt,
802fe56b9e6SYuval Mintz 			     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
803fe56b9e6SYuval Mintz 		if (val == set_val)
804fe56b9e6SYuval Mintz 			break;
805fe56b9e6SYuval Mintz 
806fe56b9e6SYuval Mintz 		usleep_range(50, 60);
807fe56b9e6SYuval Mintz 	}
808fe56b9e6SYuval Mintz 
809fe56b9e6SYuval Mintz 	if (val != set_val) {
810fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn,
811fe56b9e6SYuval Mintz 			  "PFID_ENABLE_MASTER wasn't changed after a second\n");
812fe56b9e6SYuval Mintz 		return -EAGAIN;
813fe56b9e6SYuval Mintz 	}
814fe56b9e6SYuval Mintz 
815fe56b9e6SYuval Mintz 	return 0;
816fe56b9e6SYuval Mintz }
817fe56b9e6SYuval Mintz 
818fe56b9e6SYuval Mintz static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
819fe56b9e6SYuval Mintz 				struct qed_ptt *p_main_ptt)
820fe56b9e6SYuval Mintz {
821fe56b9e6SYuval Mintz 	/* Read shadow of current MFW mailbox */
822fe56b9e6SYuval Mintz 	qed_mcp_read_mb(p_hwfn, p_main_ptt);
823fe56b9e6SYuval Mintz 	memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
824fe56b9e6SYuval Mintz 	       p_hwfn->mcp_info->mfw_mb_cur,
825fe56b9e6SYuval Mintz 	       p_hwfn->mcp_info->mfw_mb_length);
826fe56b9e6SYuval Mintz }
827fe56b9e6SYuval Mintz 
828fe56b9e6SYuval Mintz int qed_hw_init(struct qed_dev *cdev,
829464f6645SManish Chopra 		struct qed_tunn_start_params *p_tunn,
830fe56b9e6SYuval Mintz 		bool b_hw_start,
831fe56b9e6SYuval Mintz 		enum qed_int_mode int_mode,
832fe56b9e6SYuval Mintz 		bool allow_npar_tx_switch,
833fe56b9e6SYuval Mintz 		const u8 *bin_fw_data)
834fe56b9e6SYuval Mintz {
83586622ee7SYuval Mintz 	u32 load_code, param;
836fe56b9e6SYuval Mintz 	int rc, mfw_rc, i;
837fe56b9e6SYuval Mintz 
838bb13ace7SSudarsana Reddy Kalluru 	if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
839bb13ace7SSudarsana Reddy Kalluru 		DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
840bb13ace7SSudarsana Reddy Kalluru 		return -EINVAL;
841bb13ace7SSudarsana Reddy Kalluru 	}
842bb13ace7SSudarsana Reddy Kalluru 
8431408cc1fSYuval Mintz 	if (IS_PF(cdev)) {
844fe56b9e6SYuval Mintz 		rc = qed_init_fw_data(cdev, bin_fw_data);
845fe56b9e6SYuval Mintz 		if (rc != 0)
846fe56b9e6SYuval Mintz 			return rc;
8471408cc1fSYuval Mintz 	}
848fe56b9e6SYuval Mintz 
849fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
850fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
851fe56b9e6SYuval Mintz 
8521408cc1fSYuval Mintz 		if (IS_VF(cdev)) {
8531408cc1fSYuval Mintz 			p_hwfn->b_int_enabled = 1;
8541408cc1fSYuval Mintz 			continue;
8551408cc1fSYuval Mintz 		}
8561408cc1fSYuval Mintz 
857fe56b9e6SYuval Mintz 		/* Enable DMAE in PXP */
858fe56b9e6SYuval Mintz 		rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
859fe56b9e6SYuval Mintz 
860fe56b9e6SYuval Mintz 		qed_calc_hw_mode(p_hwfn);
861fe56b9e6SYuval Mintz 
862fe56b9e6SYuval Mintz 		rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
863fe56b9e6SYuval Mintz 				      &load_code);
864fe56b9e6SYuval Mintz 		if (rc) {
865fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
866fe56b9e6SYuval Mintz 			return rc;
867fe56b9e6SYuval Mintz 		}
868fe56b9e6SYuval Mintz 
869fe56b9e6SYuval Mintz 		qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
870fe56b9e6SYuval Mintz 
871fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
872fe56b9e6SYuval Mintz 			   "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
873fe56b9e6SYuval Mintz 			   rc, load_code);
874fe56b9e6SYuval Mintz 
875fe56b9e6SYuval Mintz 		p_hwfn->first_on_engine = (load_code ==
876fe56b9e6SYuval Mintz 					   FW_MSG_CODE_DRV_LOAD_ENGINE);
877fe56b9e6SYuval Mintz 
87839651abdSSudarsana Reddy Kalluru 		if (!qm_lock_init) {
87939651abdSSudarsana Reddy Kalluru 			spin_lock_init(&qm_lock);
88039651abdSSudarsana Reddy Kalluru 			qm_lock_init = true;
88139651abdSSudarsana Reddy Kalluru 		}
88239651abdSSudarsana Reddy Kalluru 
883fe56b9e6SYuval Mintz 		switch (load_code) {
884fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_ENGINE:
885fe56b9e6SYuval Mintz 			rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
886fe56b9e6SYuval Mintz 						p_hwfn->hw_info.hw_mode);
887fe56b9e6SYuval Mintz 			if (rc)
888fe56b9e6SYuval Mintz 				break;
889fe56b9e6SYuval Mintz 		/* Fall into */
890fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_PORT:
891fe56b9e6SYuval Mintz 			rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
892fe56b9e6SYuval Mintz 					      p_hwfn->hw_info.hw_mode);
893fe56b9e6SYuval Mintz 			if (rc)
894fe56b9e6SYuval Mintz 				break;
895fe56b9e6SYuval Mintz 
896fe56b9e6SYuval Mintz 		/* Fall into */
897fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
898fe56b9e6SYuval Mintz 			rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
899464f6645SManish Chopra 					    p_tunn, p_hwfn->hw_info.hw_mode,
900fe56b9e6SYuval Mintz 					    b_hw_start, int_mode,
901fe56b9e6SYuval Mintz 					    allow_npar_tx_switch);
902fe56b9e6SYuval Mintz 			break;
903fe56b9e6SYuval Mintz 		default:
904fe56b9e6SYuval Mintz 			rc = -EINVAL;
905fe56b9e6SYuval Mintz 			break;
906fe56b9e6SYuval Mintz 		}
907fe56b9e6SYuval Mintz 
908fe56b9e6SYuval Mintz 		if (rc)
909fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn,
910fe56b9e6SYuval Mintz 				  "init phase failed for loadcode 0x%x (rc %d)\n",
911fe56b9e6SYuval Mintz 				   load_code, rc);
912fe56b9e6SYuval Mintz 
913fe56b9e6SYuval Mintz 		/* ACK mfw regardless of success or failure of initialization */
914fe56b9e6SYuval Mintz 		mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
915fe56b9e6SYuval Mintz 				     DRV_MSG_CODE_LOAD_DONE,
916fe56b9e6SYuval Mintz 				     0, &load_code, &param);
917fe56b9e6SYuval Mintz 		if (rc)
918fe56b9e6SYuval Mintz 			return rc;
919fe56b9e6SYuval Mintz 		if (mfw_rc) {
920fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
921fe56b9e6SYuval Mintz 			return mfw_rc;
922fe56b9e6SYuval Mintz 		}
923fe56b9e6SYuval Mintz 
92439651abdSSudarsana Reddy Kalluru 		/* send DCBX attention request command */
92539651abdSSudarsana Reddy Kalluru 		DP_VERBOSE(p_hwfn,
92639651abdSSudarsana Reddy Kalluru 			   QED_MSG_DCB,
92739651abdSSudarsana Reddy Kalluru 			   "sending phony dcbx set command to trigger DCBx attention handling\n");
92839651abdSSudarsana Reddy Kalluru 		mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
92939651abdSSudarsana Reddy Kalluru 				     DRV_MSG_CODE_SET_DCBX,
93039651abdSSudarsana Reddy Kalluru 				     1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
93139651abdSSudarsana Reddy Kalluru 				     &load_code, &param);
93239651abdSSudarsana Reddy Kalluru 		if (mfw_rc) {
93339651abdSSudarsana Reddy Kalluru 			DP_NOTICE(p_hwfn,
93439651abdSSudarsana Reddy Kalluru 				  "Failed to send DCBX attention request\n");
93539651abdSSudarsana Reddy Kalluru 			return mfw_rc;
93639651abdSSudarsana Reddy Kalluru 		}
93739651abdSSudarsana Reddy Kalluru 
938fe56b9e6SYuval Mintz 		p_hwfn->hw_init_done = true;
939fe56b9e6SYuval Mintz 	}
940fe56b9e6SYuval Mintz 
941fe56b9e6SYuval Mintz 	return 0;
942fe56b9e6SYuval Mintz }
943fe56b9e6SYuval Mintz 
944fe56b9e6SYuval Mintz #define QED_HW_STOP_RETRY_LIMIT (10)
9458c925c44SYuval Mintz static inline void qed_hw_timers_stop(struct qed_dev *cdev,
9468c925c44SYuval Mintz 				      struct qed_hwfn *p_hwfn,
9478c925c44SYuval Mintz 				      struct qed_ptt *p_ptt)
9488c925c44SYuval Mintz {
9498c925c44SYuval Mintz 	int i;
9508c925c44SYuval Mintz 
9518c925c44SYuval Mintz 	/* close timers */
9528c925c44SYuval Mintz 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
9538c925c44SYuval Mintz 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
9548c925c44SYuval Mintz 
9558c925c44SYuval Mintz 	for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
9568c925c44SYuval Mintz 		if ((!qed_rd(p_hwfn, p_ptt,
9578c925c44SYuval Mintz 			     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
9588c925c44SYuval Mintz 		    (!qed_rd(p_hwfn, p_ptt,
9598c925c44SYuval Mintz 			     TM_REG_PF_SCAN_ACTIVE_TASK)))
9608c925c44SYuval Mintz 			break;
9618c925c44SYuval Mintz 
9628c925c44SYuval Mintz 		/* Dependent on number of connection/tasks, possibly
9638c925c44SYuval Mintz 		 * 1ms sleep is required between polls
9648c925c44SYuval Mintz 		 */
9658c925c44SYuval Mintz 		usleep_range(1000, 2000);
9668c925c44SYuval Mintz 	}
9678c925c44SYuval Mintz 
9688c925c44SYuval Mintz 	if (i < QED_HW_STOP_RETRY_LIMIT)
9698c925c44SYuval Mintz 		return;
9708c925c44SYuval Mintz 
9718c925c44SYuval Mintz 	DP_NOTICE(p_hwfn,
9728c925c44SYuval Mintz 		  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
9738c925c44SYuval Mintz 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
9748c925c44SYuval Mintz 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
9758c925c44SYuval Mintz }
9768c925c44SYuval Mintz 
9778c925c44SYuval Mintz void qed_hw_timers_stop_all(struct qed_dev *cdev)
9788c925c44SYuval Mintz {
9798c925c44SYuval Mintz 	int j;
9808c925c44SYuval Mintz 
9818c925c44SYuval Mintz 	for_each_hwfn(cdev, j) {
9828c925c44SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
9838c925c44SYuval Mintz 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
9848c925c44SYuval Mintz 
9858c925c44SYuval Mintz 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
9868c925c44SYuval Mintz 	}
9878c925c44SYuval Mintz }
9888c925c44SYuval Mintz 
989fe56b9e6SYuval Mintz int qed_hw_stop(struct qed_dev *cdev)
990fe56b9e6SYuval Mintz {
991fe56b9e6SYuval Mintz 	int rc = 0, t_rc;
9928c925c44SYuval Mintz 	int j;
993fe56b9e6SYuval Mintz 
994fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, j) {
995fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
996fe56b9e6SYuval Mintz 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
997fe56b9e6SYuval Mintz 
998fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
999fe56b9e6SYuval Mintz 
10001408cc1fSYuval Mintz 		if (IS_VF(cdev)) {
10010b55e27dSYuval Mintz 			qed_vf_pf_int_cleanup(p_hwfn);
10021408cc1fSYuval Mintz 			continue;
10031408cc1fSYuval Mintz 		}
10041408cc1fSYuval Mintz 
1005fe56b9e6SYuval Mintz 		/* mark the hw as uninitialized... */
1006fe56b9e6SYuval Mintz 		p_hwfn->hw_init_done = false;
1007fe56b9e6SYuval Mintz 
1008fe56b9e6SYuval Mintz 		rc = qed_sp_pf_stop(p_hwfn);
1009fe56b9e6SYuval Mintz 		if (rc)
10108c925c44SYuval Mintz 			DP_NOTICE(p_hwfn,
10118c925c44SYuval Mintz 				  "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
1012fe56b9e6SYuval Mintz 
1013fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt,
1014fe56b9e6SYuval Mintz 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
1015fe56b9e6SYuval Mintz 
1016fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
1017fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
1018fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
1019fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
1020fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
1021fe56b9e6SYuval Mintz 
10228c925c44SYuval Mintz 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
1023fe56b9e6SYuval Mintz 
1024fe56b9e6SYuval Mintz 		/* Disable Attention Generation */
1025fe56b9e6SYuval Mintz 		qed_int_igu_disable_int(p_hwfn, p_ptt);
1026fe56b9e6SYuval Mintz 
1027fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
1028fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
1029fe56b9e6SYuval Mintz 
1030fe56b9e6SYuval Mintz 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
1031fe56b9e6SYuval Mintz 
1032fe56b9e6SYuval Mintz 		/* Need to wait 1ms to guarantee SBs are cleared */
1033fe56b9e6SYuval Mintz 		usleep_range(1000, 2000);
1034fe56b9e6SYuval Mintz 	}
1035fe56b9e6SYuval Mintz 
10361408cc1fSYuval Mintz 	if (IS_PF(cdev)) {
1037fe56b9e6SYuval Mintz 		/* Disable DMAE in PXP - in CMT, this should only be done for
1038fe56b9e6SYuval Mintz 		 * first hw-function, and only after all transactions have
1039fe56b9e6SYuval Mintz 		 * stopped for all active hw-functions.
1040fe56b9e6SYuval Mintz 		 */
1041fe56b9e6SYuval Mintz 		t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
10421408cc1fSYuval Mintz 					   cdev->hwfns[0].p_main_ptt, false);
1043fe56b9e6SYuval Mintz 		if (t_rc != 0)
1044fe56b9e6SYuval Mintz 			rc = t_rc;
10451408cc1fSYuval Mintz 	}
1046fe56b9e6SYuval Mintz 
1047fe56b9e6SYuval Mintz 	return rc;
1048fe56b9e6SYuval Mintz }
1049fe56b9e6SYuval Mintz 
1050cee4d264SManish Chopra void qed_hw_stop_fastpath(struct qed_dev *cdev)
1051cee4d264SManish Chopra {
10528c925c44SYuval Mintz 	int j;
1053cee4d264SManish Chopra 
1054cee4d264SManish Chopra 	for_each_hwfn(cdev, j) {
1055cee4d264SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
1056cee4d264SManish Chopra 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
1057cee4d264SManish Chopra 
1058dacd88d6SYuval Mintz 		if (IS_VF(cdev)) {
1059dacd88d6SYuval Mintz 			qed_vf_pf_int_cleanup(p_hwfn);
1060dacd88d6SYuval Mintz 			continue;
1061dacd88d6SYuval Mintz 		}
1062dacd88d6SYuval Mintz 
1063cee4d264SManish Chopra 		DP_VERBOSE(p_hwfn,
1064cee4d264SManish Chopra 			   NETIF_MSG_IFDOWN,
1065cee4d264SManish Chopra 			   "Shutting down the fastpath\n");
1066cee4d264SManish Chopra 
1067cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt,
1068cee4d264SManish Chopra 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
1069cee4d264SManish Chopra 
1070cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
1071cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
1072cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
1073cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
1074cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
1075cee4d264SManish Chopra 
1076cee4d264SManish Chopra 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
1077cee4d264SManish Chopra 
1078cee4d264SManish Chopra 		/* Need to wait 1ms to guarantee SBs are cleared */
1079cee4d264SManish Chopra 		usleep_range(1000, 2000);
1080cee4d264SManish Chopra 	}
1081cee4d264SManish Chopra }
1082cee4d264SManish Chopra 
1083cee4d264SManish Chopra void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
1084cee4d264SManish Chopra {
1085dacd88d6SYuval Mintz 	if (IS_VF(p_hwfn->cdev))
1086dacd88d6SYuval Mintz 		return;
1087dacd88d6SYuval Mintz 
1088cee4d264SManish Chopra 	/* Re-open incoming traffic */
1089cee4d264SManish Chopra 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1090cee4d264SManish Chopra 	       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
1091cee4d264SManish Chopra }
1092cee4d264SManish Chopra 
1093fe56b9e6SYuval Mintz static int qed_reg_assert(struct qed_hwfn *hwfn,
1094fe56b9e6SYuval Mintz 			  struct qed_ptt *ptt, u32 reg,
1095fe56b9e6SYuval Mintz 			  bool expected)
1096fe56b9e6SYuval Mintz {
1097fe56b9e6SYuval Mintz 	u32 assert_val = qed_rd(hwfn, ptt, reg);
1098fe56b9e6SYuval Mintz 
1099fe56b9e6SYuval Mintz 	if (assert_val != expected) {
1100fe56b9e6SYuval Mintz 		DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
1101fe56b9e6SYuval Mintz 			  reg, expected);
1102fe56b9e6SYuval Mintz 		return -EINVAL;
1103fe56b9e6SYuval Mintz 	}
1104fe56b9e6SYuval Mintz 
1105fe56b9e6SYuval Mintz 	return 0;
1106fe56b9e6SYuval Mintz }
1107fe56b9e6SYuval Mintz 
1108fe56b9e6SYuval Mintz int qed_hw_reset(struct qed_dev *cdev)
1109fe56b9e6SYuval Mintz {
1110fe56b9e6SYuval Mintz 	int rc = 0;
1111fe56b9e6SYuval Mintz 	u32 unload_resp, unload_param;
1112fe56b9e6SYuval Mintz 	int i;
1113fe56b9e6SYuval Mintz 
1114fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
1115fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1116fe56b9e6SYuval Mintz 
11171408cc1fSYuval Mintz 		if (IS_VF(cdev)) {
11180b55e27dSYuval Mintz 			rc = qed_vf_pf_reset(p_hwfn);
11190b55e27dSYuval Mintz 			if (rc)
11200b55e27dSYuval Mintz 				return rc;
11211408cc1fSYuval Mintz 			continue;
11221408cc1fSYuval Mintz 		}
11231408cc1fSYuval Mintz 
1124fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
1125fe56b9e6SYuval Mintz 
1126fe56b9e6SYuval Mintz 		/* Check for incorrect states */
1127fe56b9e6SYuval Mintz 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
1128fe56b9e6SYuval Mintz 			       QM_REG_USG_CNT_PF_TX, 0);
1129fe56b9e6SYuval Mintz 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
1130fe56b9e6SYuval Mintz 			       QM_REG_USG_CNT_PF_OTHER, 0);
1131fe56b9e6SYuval Mintz 
1132fe56b9e6SYuval Mintz 		/* Disable PF in HW blocks */
1133fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
1134fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
1135fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1136fe56b9e6SYuval Mintz 		       TCFC_REG_STRONG_ENABLE_PF, 0);
1137fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1138fe56b9e6SYuval Mintz 		       CCFC_REG_STRONG_ENABLE_PF, 0);
1139fe56b9e6SYuval Mintz 
1140fe56b9e6SYuval Mintz 		/* Send unload command to MCP */
1141fe56b9e6SYuval Mintz 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1142fe56b9e6SYuval Mintz 				 DRV_MSG_CODE_UNLOAD_REQ,
1143fe56b9e6SYuval Mintz 				 DRV_MB_PARAM_UNLOAD_WOL_MCP,
1144fe56b9e6SYuval Mintz 				 &unload_resp, &unload_param);
1145fe56b9e6SYuval Mintz 		if (rc) {
1146fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
1147fe56b9e6SYuval Mintz 			unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
1148fe56b9e6SYuval Mintz 		}
1149fe56b9e6SYuval Mintz 
1150fe56b9e6SYuval Mintz 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1151fe56b9e6SYuval Mintz 				 DRV_MSG_CODE_UNLOAD_DONE,
1152fe56b9e6SYuval Mintz 				 0, &unload_resp, &unload_param);
1153fe56b9e6SYuval Mintz 		if (rc) {
1154fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
1155fe56b9e6SYuval Mintz 			return rc;
1156fe56b9e6SYuval Mintz 		}
1157fe56b9e6SYuval Mintz 	}
1158fe56b9e6SYuval Mintz 
1159fe56b9e6SYuval Mintz 	return rc;
1160fe56b9e6SYuval Mintz }
1161fe56b9e6SYuval Mintz 
1162fe56b9e6SYuval Mintz /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
1163fe56b9e6SYuval Mintz static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
1164fe56b9e6SYuval Mintz {
1165fe56b9e6SYuval Mintz 	qed_ptt_pool_free(p_hwfn);
1166fe56b9e6SYuval Mintz 	kfree(p_hwfn->hw_info.p_igu_info);
1167fe56b9e6SYuval Mintz }
1168fe56b9e6SYuval Mintz 
1169fe56b9e6SYuval Mintz /* Setup bar access */
117012e09c69SYuval Mintz static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
1171fe56b9e6SYuval Mintz {
1172fe56b9e6SYuval Mintz 	/* clear indirect access */
1173fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
1174fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
1175fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
1176fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
1177fe56b9e6SYuval Mintz 
1178fe56b9e6SYuval Mintz 	/* Clean Previous errors if such exist */
1179fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1180fe56b9e6SYuval Mintz 	       PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
1181fe56b9e6SYuval Mintz 	       1 << p_hwfn->abs_pf_id);
1182fe56b9e6SYuval Mintz 
1183fe56b9e6SYuval Mintz 	/* enable internal target-read */
1184fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1185fe56b9e6SYuval Mintz 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1186fe56b9e6SYuval Mintz }
1187fe56b9e6SYuval Mintz 
1188fe56b9e6SYuval Mintz static void get_function_id(struct qed_hwfn *p_hwfn)
1189fe56b9e6SYuval Mintz {
1190fe56b9e6SYuval Mintz 	/* ME Register */
1191fe56b9e6SYuval Mintz 	p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
1192fe56b9e6SYuval Mintz 
1193fe56b9e6SYuval Mintz 	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
1194fe56b9e6SYuval Mintz 
1195fe56b9e6SYuval Mintz 	p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
1196fe56b9e6SYuval Mintz 	p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1197fe56b9e6SYuval Mintz 				      PXP_CONCRETE_FID_PFID);
1198fe56b9e6SYuval Mintz 	p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1199fe56b9e6SYuval Mintz 				    PXP_CONCRETE_FID_PORT);
1200fe56b9e6SYuval Mintz }
1201fe56b9e6SYuval Mintz 
120225c089d7SYuval Mintz static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
120325c089d7SYuval Mintz {
120425c089d7SYuval Mintz 	u32 *feat_num = p_hwfn->hw_info.feat_num;
120525c089d7SYuval Mintz 	int num_features = 1;
120625c089d7SYuval Mintz 
120725c089d7SYuval Mintz 	feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
120825c089d7SYuval Mintz 						num_features,
120925c089d7SYuval Mintz 					RESC_NUM(p_hwfn, QED_L2_QUEUE));
121025c089d7SYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
121125c089d7SYuval Mintz 		   "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
121225c089d7SYuval Mintz 		   feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
121325c089d7SYuval Mintz 		   num_features);
121425c089d7SYuval Mintz }
121525c089d7SYuval Mintz 
1216fe56b9e6SYuval Mintz static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1217fe56b9e6SYuval Mintz {
1218fe56b9e6SYuval Mintz 	u32 *resc_start = p_hwfn->hw_info.resc_start;
12191408cc1fSYuval Mintz 	u8 num_funcs = p_hwfn->num_funcs_on_engine;
1220fe56b9e6SYuval Mintz 	u32 *resc_num = p_hwfn->hw_info.resc_num;
12214ac801b7SYuval Mintz 	struct qed_sb_cnt_info sb_cnt_info;
122208feecd7SYuval Mintz 	int i, max_vf_vlan_filters;
1223fe56b9e6SYuval Mintz 
12244ac801b7SYuval Mintz 	memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
122508feecd7SYuval Mintz 
122608feecd7SYuval Mintz #ifdef CONFIG_QED_SRIOV
122708feecd7SYuval Mintz 	max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS;
122808feecd7SYuval Mintz #else
122908feecd7SYuval Mintz 	max_vf_vlan_filters = 0;
123008feecd7SYuval Mintz #endif
123108feecd7SYuval Mintz 
12324ac801b7SYuval Mintz 	qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
12334ac801b7SYuval Mintz 
1234fe56b9e6SYuval Mintz 	resc_num[QED_SB] = min_t(u32,
1235fe56b9e6SYuval Mintz 				 (MAX_SB_PER_PATH_BB / num_funcs),
12364ac801b7SYuval Mintz 				 sb_cnt_info.sb_cnt);
123725c089d7SYuval Mintz 	resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
1238fe56b9e6SYuval Mintz 	resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
123925c089d7SYuval Mintz 	resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
1240fe56b9e6SYuval Mintz 	resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
1241fe56b9e6SYuval Mintz 	resc_num[QED_RL] = 8;
124225c089d7SYuval Mintz 	resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
124325c089d7SYuval Mintz 	resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
124425c089d7SYuval Mintz 			     num_funcs;
1245fe56b9e6SYuval Mintz 	resc_num[QED_ILT] = 950;
1246fe56b9e6SYuval Mintz 
1247fe56b9e6SYuval Mintz 	for (i = 0; i < QED_MAX_RESC; i++)
1248fe56b9e6SYuval Mintz 		resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1249fe56b9e6SYuval Mintz 
125025c089d7SYuval Mintz 	qed_hw_set_feat(p_hwfn);
125125c089d7SYuval Mintz 
1252fe56b9e6SYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1253fe56b9e6SYuval Mintz 		   "The numbers for each resource are:\n"
1254fe56b9e6SYuval Mintz 		   "SB = %d start = %d\n"
125525c089d7SYuval Mintz 		   "L2_QUEUE = %d start = %d\n"
1256fe56b9e6SYuval Mintz 		   "VPORT = %d start = %d\n"
1257fe56b9e6SYuval Mintz 		   "PQ = %d start = %d\n"
1258fe56b9e6SYuval Mintz 		   "RL = %d start = %d\n"
125925c089d7SYuval Mintz 		   "MAC = %d start = %d\n"
126025c089d7SYuval Mintz 		   "VLAN = %d start = %d\n"
1261fe56b9e6SYuval Mintz 		   "ILT = %d start = %d\n",
1262fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_SB],
1263fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_SB],
126425c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
126525c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
1266fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_VPORT],
1267fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_VPORT],
1268fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_PQ],
1269fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_PQ],
1270fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_RL],
1271fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_RL],
127225c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_MAC],
127325c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_MAC],
127425c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_VLAN],
127525c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_VLAN],
1276fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_ILT],
1277fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_ILT]);
1278fe56b9e6SYuval Mintz }
1279fe56b9e6SYuval Mintz 
1280fe56b9e6SYuval Mintz static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1281fe56b9e6SYuval Mintz 			       struct qed_ptt *p_ptt)
1282fe56b9e6SYuval Mintz {
1283cc875c2eSYuval Mintz 	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1284fc48b7a6SYuval Mintz 	u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
1285cc875c2eSYuval Mintz 	struct qed_mcp_link_params *link;
1286fe56b9e6SYuval Mintz 
1287fe56b9e6SYuval Mintz 	/* Read global nvm_cfg address */
1288fe56b9e6SYuval Mintz 	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1289fe56b9e6SYuval Mintz 
1290fe56b9e6SYuval Mintz 	/* Verify MCP has initialized it */
1291fe56b9e6SYuval Mintz 	if (!nvm_cfg_addr) {
1292fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1293fe56b9e6SYuval Mintz 		return -EINVAL;
1294fe56b9e6SYuval Mintz 	}
1295fe56b9e6SYuval Mintz 
1296fe56b9e6SYuval Mintz 	/* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
1297fe56b9e6SYuval Mintz 	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1298fe56b9e6SYuval Mintz 
1299cc875c2eSYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1300cc875c2eSYuval Mintz 	       offsetof(struct nvm_cfg1, glob) +
1301cc875c2eSYuval Mintz 	       offsetof(struct nvm_cfg1_glob, core_cfg);
1302cc875c2eSYuval Mintz 
1303cc875c2eSYuval Mintz 	core_cfg = qed_rd(p_hwfn, p_ptt, addr);
1304cc875c2eSYuval Mintz 
1305cc875c2eSYuval Mintz 	switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1306cc875c2eSYuval Mintz 		NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1307cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1308cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1309cc875c2eSYuval Mintz 		break;
1310cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1311cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1312cc875c2eSYuval Mintz 		break;
1313cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1314cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1315cc875c2eSYuval Mintz 		break;
1316cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1317cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1318cc875c2eSYuval Mintz 		break;
1319cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1320cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1321cc875c2eSYuval Mintz 		break;
1322cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1323cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1324cc875c2eSYuval Mintz 		break;
1325cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1326cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1327cc875c2eSYuval Mintz 		break;
1328cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1329cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1330cc875c2eSYuval Mintz 		break;
1331cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1332cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1333cc875c2eSYuval Mintz 		break;
1334cc875c2eSYuval Mintz 	default:
1335cc875c2eSYuval Mintz 		DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
1336cc875c2eSYuval Mintz 			  core_cfg);
1337cc875c2eSYuval Mintz 		break;
1338cc875c2eSYuval Mintz 	}
1339cc875c2eSYuval Mintz 
1340cc875c2eSYuval Mintz 	/* Read default link configuration */
1341cc875c2eSYuval Mintz 	link = &p_hwfn->mcp_info->link_input;
1342cc875c2eSYuval Mintz 	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1343cc875c2eSYuval Mintz 			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1344cc875c2eSYuval Mintz 	link_temp = qed_rd(p_hwfn, p_ptt,
1345cc875c2eSYuval Mintz 			   port_cfg_addr +
1346cc875c2eSYuval Mintz 			   offsetof(struct nvm_cfg1_port, speed_cap_mask));
1347cc875c2eSYuval Mintz 	link->speed.advertised_speeds =
1348cc875c2eSYuval Mintz 		link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1349cc875c2eSYuval Mintz 
1350cc875c2eSYuval Mintz 	p_hwfn->mcp_info->link_capabilities.speed_capabilities =
1351cc875c2eSYuval Mintz 						link->speed.advertised_speeds;
1352cc875c2eSYuval Mintz 
1353cc875c2eSYuval Mintz 	link_temp = qed_rd(p_hwfn, p_ptt,
1354cc875c2eSYuval Mintz 			   port_cfg_addr +
1355cc875c2eSYuval Mintz 			   offsetof(struct nvm_cfg1_port, link_settings));
1356cc875c2eSYuval Mintz 	switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1357cc875c2eSYuval Mintz 		NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1358cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1359cc875c2eSYuval Mintz 		link->speed.autoneg = true;
1360cc875c2eSYuval Mintz 		break;
1361cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1362cc875c2eSYuval Mintz 		link->speed.forced_speed = 1000;
1363cc875c2eSYuval Mintz 		break;
1364cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1365cc875c2eSYuval Mintz 		link->speed.forced_speed = 10000;
1366cc875c2eSYuval Mintz 		break;
1367cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1368cc875c2eSYuval Mintz 		link->speed.forced_speed = 25000;
1369cc875c2eSYuval Mintz 		break;
1370cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1371cc875c2eSYuval Mintz 		link->speed.forced_speed = 40000;
1372cc875c2eSYuval Mintz 		break;
1373cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1374cc875c2eSYuval Mintz 		link->speed.forced_speed = 50000;
1375cc875c2eSYuval Mintz 		break;
1376cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1377cc875c2eSYuval Mintz 		link->speed.forced_speed = 100000;
1378cc875c2eSYuval Mintz 		break;
1379cc875c2eSYuval Mintz 	default:
1380cc875c2eSYuval Mintz 		DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
1381cc875c2eSYuval Mintz 			  link_temp);
1382cc875c2eSYuval Mintz 	}
1383cc875c2eSYuval Mintz 
1384cc875c2eSYuval Mintz 	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1385cc875c2eSYuval Mintz 	link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1386cc875c2eSYuval Mintz 	link->pause.autoneg = !!(link_temp &
1387cc875c2eSYuval Mintz 				 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1388cc875c2eSYuval Mintz 	link->pause.forced_rx = !!(link_temp &
1389cc875c2eSYuval Mintz 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1390cc875c2eSYuval Mintz 	link->pause.forced_tx = !!(link_temp &
1391cc875c2eSYuval Mintz 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1392cc875c2eSYuval Mintz 	link->loopback_mode = 0;
1393cc875c2eSYuval Mintz 
1394cc875c2eSYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1395cc875c2eSYuval Mintz 		   "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1396cc875c2eSYuval Mintz 		   link->speed.forced_speed, link->speed.advertised_speeds,
1397cc875c2eSYuval Mintz 		   link->speed.autoneg, link->pause.autoneg);
1398cc875c2eSYuval Mintz 
1399fe56b9e6SYuval Mintz 	/* Read Multi-function information from shmem */
1400fe56b9e6SYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1401fe56b9e6SYuval Mintz 	       offsetof(struct nvm_cfg1, glob) +
1402fe56b9e6SYuval Mintz 	       offsetof(struct nvm_cfg1_glob, generic_cont0);
1403fe56b9e6SYuval Mintz 
1404fe56b9e6SYuval Mintz 	generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1405fe56b9e6SYuval Mintz 
1406fe56b9e6SYuval Mintz 	mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1407fe56b9e6SYuval Mintz 		  NVM_CFG1_GLOB_MF_MODE_OFFSET;
1408fe56b9e6SYuval Mintz 
1409fe56b9e6SYuval Mintz 	switch (mf_mode) {
1410fe56b9e6SYuval Mintz 	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1411fc48b7a6SYuval Mintz 		p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
1412fe56b9e6SYuval Mintz 		break;
1413fe56b9e6SYuval Mintz 	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1414fc48b7a6SYuval Mintz 		p_hwfn->cdev->mf_mode = QED_MF_NPAR;
1415fe56b9e6SYuval Mintz 		break;
1416fc48b7a6SYuval Mintz 	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
1417fc48b7a6SYuval Mintz 		p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
1418fe56b9e6SYuval Mintz 		break;
1419fe56b9e6SYuval Mintz 	}
1420fe56b9e6SYuval Mintz 	DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1421fe56b9e6SYuval Mintz 		p_hwfn->cdev->mf_mode);
1422fe56b9e6SYuval Mintz 
1423fc48b7a6SYuval Mintz 	/* Read Multi-function information from shmem */
1424fc48b7a6SYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1425fc48b7a6SYuval Mintz 		offsetof(struct nvm_cfg1, glob) +
1426fc48b7a6SYuval Mintz 		offsetof(struct nvm_cfg1_glob, device_capabilities);
1427fc48b7a6SYuval Mintz 
1428fc48b7a6SYuval Mintz 	device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
1429fc48b7a6SYuval Mintz 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
1430fc48b7a6SYuval Mintz 		__set_bit(QED_DEV_CAP_ETH,
1431fc48b7a6SYuval Mintz 			  &p_hwfn->hw_info.device_capabilities);
1432fc48b7a6SYuval Mintz 
1433fe56b9e6SYuval Mintz 	return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1434fe56b9e6SYuval Mintz }
1435fe56b9e6SYuval Mintz 
14361408cc1fSYuval Mintz static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
14371408cc1fSYuval Mintz {
14381408cc1fSYuval Mintz 	u32 reg_function_hide, tmp, eng_mask;
14391408cc1fSYuval Mintz 	u8 num_funcs;
14401408cc1fSYuval Mintz 
14411408cc1fSYuval Mintz 	num_funcs = MAX_NUM_PFS_BB;
14421408cc1fSYuval Mintz 
14431408cc1fSYuval Mintz 	/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
14441408cc1fSYuval Mintz 	 * in the other bits are selected.
14451408cc1fSYuval Mintz 	 * Bits 1-15 are for functions 1-15, respectively, and their value is
14461408cc1fSYuval Mintz 	 * '0' only for enabled functions (function 0 always exists and
14471408cc1fSYuval Mintz 	 * enabled).
14481408cc1fSYuval Mintz 	 * In case of CMT, only the "even" functions are enabled, and thus the
14491408cc1fSYuval Mintz 	 * number of functions for both hwfns is learnt from the same bits.
14501408cc1fSYuval Mintz 	 */
14511408cc1fSYuval Mintz 	reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
14521408cc1fSYuval Mintz 
14531408cc1fSYuval Mintz 	if (reg_function_hide & 0x1) {
14541408cc1fSYuval Mintz 		if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
14551408cc1fSYuval Mintz 			num_funcs = 0;
14561408cc1fSYuval Mintz 			eng_mask = 0xaaaa;
14571408cc1fSYuval Mintz 		} else {
14581408cc1fSYuval Mintz 			num_funcs = 1;
14591408cc1fSYuval Mintz 			eng_mask = 0x5554;
14601408cc1fSYuval Mintz 		}
14611408cc1fSYuval Mintz 
14621408cc1fSYuval Mintz 		/* Get the number of the enabled functions on the engine */
14631408cc1fSYuval Mintz 		tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
14641408cc1fSYuval Mintz 		while (tmp) {
14651408cc1fSYuval Mintz 			if (tmp & 0x1)
14661408cc1fSYuval Mintz 				num_funcs++;
14671408cc1fSYuval Mintz 			tmp >>= 0x1;
14681408cc1fSYuval Mintz 		}
14691408cc1fSYuval Mintz 	}
14701408cc1fSYuval Mintz 
14711408cc1fSYuval Mintz 	p_hwfn->num_funcs_on_engine = num_funcs;
14721408cc1fSYuval Mintz 
14731408cc1fSYuval Mintz 	DP_VERBOSE(p_hwfn,
14741408cc1fSYuval Mintz 		   NETIF_MSG_PROBE,
14751408cc1fSYuval Mintz 		   "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
14761408cc1fSYuval Mintz 		   p_hwfn->rel_pf_id,
14771408cc1fSYuval Mintz 		   p_hwfn->abs_pf_id,
14781408cc1fSYuval Mintz 		   p_hwfn->num_funcs_on_engine);
14791408cc1fSYuval Mintz }
14801408cc1fSYuval Mintz 
1481fe56b9e6SYuval Mintz static int
1482fe56b9e6SYuval Mintz qed_get_hw_info(struct qed_hwfn *p_hwfn,
1483fe56b9e6SYuval Mintz 		struct qed_ptt *p_ptt,
1484fe56b9e6SYuval Mintz 		enum qed_pci_personality personality)
1485fe56b9e6SYuval Mintz {
1486fe56b9e6SYuval Mintz 	u32 port_mode;
1487fe56b9e6SYuval Mintz 	int rc;
1488fe56b9e6SYuval Mintz 
148932a47e72SYuval Mintz 	/* Since all information is common, only first hwfns should do this */
149032a47e72SYuval Mintz 	if (IS_LEAD_HWFN(p_hwfn)) {
149132a47e72SYuval Mintz 		rc = qed_iov_hw_info(p_hwfn);
149232a47e72SYuval Mintz 		if (rc)
149332a47e72SYuval Mintz 			return rc;
149432a47e72SYuval Mintz 	}
149532a47e72SYuval Mintz 
1496fe56b9e6SYuval Mintz 	/* Read the port mode */
1497fe56b9e6SYuval Mintz 	port_mode = qed_rd(p_hwfn, p_ptt,
1498fe56b9e6SYuval Mintz 			   CNIG_REG_NW_PORT_MODE_BB_B0);
1499fe56b9e6SYuval Mintz 
1500fe56b9e6SYuval Mintz 	if (port_mode < 3) {
1501fe56b9e6SYuval Mintz 		p_hwfn->cdev->num_ports_in_engines = 1;
1502fe56b9e6SYuval Mintz 	} else if (port_mode <= 5) {
1503fe56b9e6SYuval Mintz 		p_hwfn->cdev->num_ports_in_engines = 2;
1504fe56b9e6SYuval Mintz 	} else {
1505fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1506fe56b9e6SYuval Mintz 			  p_hwfn->cdev->num_ports_in_engines);
1507fe56b9e6SYuval Mintz 
1508fe56b9e6SYuval Mintz 		/* Default num_ports_in_engines to something */
1509fe56b9e6SYuval Mintz 		p_hwfn->cdev->num_ports_in_engines = 1;
1510fe56b9e6SYuval Mintz 	}
1511fe56b9e6SYuval Mintz 
1512fe56b9e6SYuval Mintz 	qed_hw_get_nvm_info(p_hwfn, p_ptt);
1513fe56b9e6SYuval Mintz 
1514fe56b9e6SYuval Mintz 	rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1515fe56b9e6SYuval Mintz 	if (rc)
1516fe56b9e6SYuval Mintz 		return rc;
1517fe56b9e6SYuval Mintz 
1518fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn))
1519fe56b9e6SYuval Mintz 		ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1520fe56b9e6SYuval Mintz 				p_hwfn->mcp_info->func_info.mac);
1521fe56b9e6SYuval Mintz 	else
1522fe56b9e6SYuval Mintz 		eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1523fe56b9e6SYuval Mintz 
1524fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn)) {
1525fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1526fe56b9e6SYuval Mintz 			p_hwfn->hw_info.ovlan =
1527fe56b9e6SYuval Mintz 				p_hwfn->mcp_info->func_info.ovlan;
1528fe56b9e6SYuval Mintz 
1529fe56b9e6SYuval Mintz 		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1530fe56b9e6SYuval Mintz 	}
1531fe56b9e6SYuval Mintz 
1532fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn)) {
1533fe56b9e6SYuval Mintz 		enum qed_pci_personality protocol;
1534fe56b9e6SYuval Mintz 
1535fe56b9e6SYuval Mintz 		protocol = p_hwfn->mcp_info->func_info.protocol;
1536fe56b9e6SYuval Mintz 		p_hwfn->hw_info.personality = protocol;
1537fe56b9e6SYuval Mintz 	}
1538fe56b9e6SYuval Mintz 
15391408cc1fSYuval Mintz 	qed_get_num_funcs(p_hwfn, p_ptt);
15401408cc1fSYuval Mintz 
1541fe56b9e6SYuval Mintz 	qed_hw_get_resc(p_hwfn);
1542fe56b9e6SYuval Mintz 
1543fe56b9e6SYuval Mintz 	return rc;
1544fe56b9e6SYuval Mintz }
1545fe56b9e6SYuval Mintz 
154612e09c69SYuval Mintz static int qed_get_dev_info(struct qed_dev *cdev)
1547fe56b9e6SYuval Mintz {
1548fc48b7a6SYuval Mintz 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1549fe56b9e6SYuval Mintz 	u32 tmp;
1550fe56b9e6SYuval Mintz 
1551fc48b7a6SYuval Mintz 	/* Read Vendor Id / Device Id */
1552fc48b7a6SYuval Mintz 	pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
1553fc48b7a6SYuval Mintz 			     &cdev->vendor_id);
1554fc48b7a6SYuval Mintz 	pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
1555fc48b7a6SYuval Mintz 			     &cdev->device_id);
1556fc48b7a6SYuval Mintz 	cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1557fe56b9e6SYuval Mintz 				     MISCS_REG_CHIP_NUM);
1558fc48b7a6SYuval Mintz 	cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1559fe56b9e6SYuval Mintz 				     MISCS_REG_CHIP_REV);
1560fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_REV, cdev->chip_rev);
1561fe56b9e6SYuval Mintz 
1562fc48b7a6SYuval Mintz 	cdev->type = QED_DEV_TYPE_BB;
1563fe56b9e6SYuval Mintz 	/* Learn number of HW-functions */
1564fc48b7a6SYuval Mintz 	tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1565fe56b9e6SYuval Mintz 		     MISCS_REG_CMT_ENABLED_FOR_PAIR);
1566fe56b9e6SYuval Mintz 
1567fc48b7a6SYuval Mintz 	if (tmp & (1 << p_hwfn->rel_pf_id)) {
1568fe56b9e6SYuval Mintz 		DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1569fe56b9e6SYuval Mintz 		cdev->num_hwfns = 2;
1570fe56b9e6SYuval Mintz 	} else {
1571fe56b9e6SYuval Mintz 		cdev->num_hwfns = 1;
1572fe56b9e6SYuval Mintz 	}
1573fe56b9e6SYuval Mintz 
1574fc48b7a6SYuval Mintz 	cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1575fe56b9e6SYuval Mintz 				    MISCS_REG_CHIP_TEST_REG) >> 4;
1576fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
1577fc48b7a6SYuval Mintz 	cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1578fe56b9e6SYuval Mintz 				       MISCS_REG_CHIP_METAL);
1579fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1580fe56b9e6SYuval Mintz 
1581fe56b9e6SYuval Mintz 	DP_INFO(cdev->hwfns,
1582fe56b9e6SYuval Mintz 		"Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1583fe56b9e6SYuval Mintz 		cdev->chip_num, cdev->chip_rev,
1584fe56b9e6SYuval Mintz 		cdev->chip_bond_id, cdev->chip_metal);
158512e09c69SYuval Mintz 
158612e09c69SYuval Mintz 	if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
158712e09c69SYuval Mintz 		DP_NOTICE(cdev->hwfns,
158812e09c69SYuval Mintz 			  "The chip type/rev (BB A0) is not supported!\n");
158912e09c69SYuval Mintz 		return -EINVAL;
159012e09c69SYuval Mintz 	}
159112e09c69SYuval Mintz 
159212e09c69SYuval Mintz 	return 0;
1593fe56b9e6SYuval Mintz }
1594fe56b9e6SYuval Mintz 
1595fe56b9e6SYuval Mintz static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1596fe56b9e6SYuval Mintz 				 void __iomem *p_regview,
1597fe56b9e6SYuval Mintz 				 void __iomem *p_doorbells,
1598fe56b9e6SYuval Mintz 				 enum qed_pci_personality personality)
1599fe56b9e6SYuval Mintz {
1600fe56b9e6SYuval Mintz 	int rc = 0;
1601fe56b9e6SYuval Mintz 
1602fe56b9e6SYuval Mintz 	/* Split PCI bars evenly between hwfns */
1603fe56b9e6SYuval Mintz 	p_hwfn->regview = p_regview;
1604fe56b9e6SYuval Mintz 	p_hwfn->doorbells = p_doorbells;
1605fe56b9e6SYuval Mintz 
16061408cc1fSYuval Mintz 	if (IS_VF(p_hwfn->cdev))
16071408cc1fSYuval Mintz 		return qed_vf_hw_prepare(p_hwfn);
16081408cc1fSYuval Mintz 
1609fe56b9e6SYuval Mintz 	/* Validate that chip access is feasible */
1610fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1611fe56b9e6SYuval Mintz 		DP_ERR(p_hwfn,
1612fe56b9e6SYuval Mintz 		       "Reading the ME register returns all Fs; Preventing further chip access\n");
1613fe56b9e6SYuval Mintz 		return -EINVAL;
1614fe56b9e6SYuval Mintz 	}
1615fe56b9e6SYuval Mintz 
1616fe56b9e6SYuval Mintz 	get_function_id(p_hwfn);
1617fe56b9e6SYuval Mintz 
161812e09c69SYuval Mintz 	/* Allocate PTT pool */
161912e09c69SYuval Mintz 	rc = qed_ptt_pool_alloc(p_hwfn);
1620fe56b9e6SYuval Mintz 	if (rc) {
1621fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1622fe56b9e6SYuval Mintz 		goto err0;
1623fe56b9e6SYuval Mintz 	}
1624fe56b9e6SYuval Mintz 
162512e09c69SYuval Mintz 	/* Allocate the main PTT */
162612e09c69SYuval Mintz 	p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
162712e09c69SYuval Mintz 
1628fe56b9e6SYuval Mintz 	/* First hwfn learns basic information, e.g., number of hwfns */
162912e09c69SYuval Mintz 	if (!p_hwfn->my_id) {
163012e09c69SYuval Mintz 		rc = qed_get_dev_info(p_hwfn->cdev);
163112e09c69SYuval Mintz 		if (rc != 0)
163212e09c69SYuval Mintz 			goto err1;
163312e09c69SYuval Mintz 	}
163412e09c69SYuval Mintz 
163512e09c69SYuval Mintz 	qed_hw_hwfn_prepare(p_hwfn);
1636fe56b9e6SYuval Mintz 
1637fe56b9e6SYuval Mintz 	/* Initialize MCP structure */
1638fe56b9e6SYuval Mintz 	rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1639fe56b9e6SYuval Mintz 	if (rc) {
1640fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1641fe56b9e6SYuval Mintz 		goto err1;
1642fe56b9e6SYuval Mintz 	}
1643fe56b9e6SYuval Mintz 
1644fe56b9e6SYuval Mintz 	/* Read the device configuration information from the HW and SHMEM */
1645fe56b9e6SYuval Mintz 	rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1646fe56b9e6SYuval Mintz 	if (rc) {
1647fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1648fe56b9e6SYuval Mintz 		goto err2;
1649fe56b9e6SYuval Mintz 	}
1650fe56b9e6SYuval Mintz 
1651fe56b9e6SYuval Mintz 	/* Allocate the init RT array and initialize the init-ops engine */
1652fe56b9e6SYuval Mintz 	rc = qed_init_alloc(p_hwfn);
1653fe56b9e6SYuval Mintz 	if (rc) {
1654fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1655fe56b9e6SYuval Mintz 		goto err2;
1656fe56b9e6SYuval Mintz 	}
1657fe56b9e6SYuval Mintz 
1658fe56b9e6SYuval Mintz 	return rc;
1659fe56b9e6SYuval Mintz err2:
166032a47e72SYuval Mintz 	if (IS_LEAD_HWFN(p_hwfn))
166132a47e72SYuval Mintz 		qed_iov_free_hw_info(p_hwfn->cdev);
1662fe56b9e6SYuval Mintz 	qed_mcp_free(p_hwfn);
1663fe56b9e6SYuval Mintz err1:
1664fe56b9e6SYuval Mintz 	qed_hw_hwfn_free(p_hwfn);
1665fe56b9e6SYuval Mintz err0:
1666fe56b9e6SYuval Mintz 	return rc;
1667fe56b9e6SYuval Mintz }
1668fe56b9e6SYuval Mintz 
1669fe56b9e6SYuval Mintz int qed_hw_prepare(struct qed_dev *cdev,
1670fe56b9e6SYuval Mintz 		   int personality)
1671fe56b9e6SYuval Mintz {
1672c78df14eSAriel Elior 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1673c78df14eSAriel Elior 	int rc;
1674fe56b9e6SYuval Mintz 
1675fe56b9e6SYuval Mintz 	/* Store the precompiled init data ptrs */
16761408cc1fSYuval Mintz 	if (IS_PF(cdev))
1677fe56b9e6SYuval Mintz 		qed_init_iro_array(cdev);
1678fe56b9e6SYuval Mintz 
1679fe56b9e6SYuval Mintz 	/* Initialize the first hwfn - will learn number of hwfns */
1680c78df14eSAriel Elior 	rc = qed_hw_prepare_single(p_hwfn,
1681c78df14eSAriel Elior 				   cdev->regview,
1682fe56b9e6SYuval Mintz 				   cdev->doorbells, personality);
1683fe56b9e6SYuval Mintz 	if (rc)
1684fe56b9e6SYuval Mintz 		return rc;
1685fe56b9e6SYuval Mintz 
1686c78df14eSAriel Elior 	personality = p_hwfn->hw_info.personality;
1687fe56b9e6SYuval Mintz 
1688fe56b9e6SYuval Mintz 	/* Initialize the rest of the hwfns */
1689c78df14eSAriel Elior 	if (cdev->num_hwfns > 1) {
1690fe56b9e6SYuval Mintz 		void __iomem *p_regview, *p_doorbell;
1691c78df14eSAriel Elior 		u8 __iomem *addr;
1692fe56b9e6SYuval Mintz 
1693c78df14eSAriel Elior 		/* adjust bar offset for second engine */
1694c2035eeaSRam Amrani 		addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
1695c78df14eSAriel Elior 		p_regview = addr;
1696c78df14eSAriel Elior 
1697c78df14eSAriel Elior 		/* adjust doorbell bar offset for second engine */
1698c2035eeaSRam Amrani 		addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
1699c78df14eSAriel Elior 		p_doorbell = addr;
1700c78df14eSAriel Elior 
1701c78df14eSAriel Elior 		/* prepare second hw function */
1702c78df14eSAriel Elior 		rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
1703fe56b9e6SYuval Mintz 					   p_doorbell, personality);
1704c78df14eSAriel Elior 
1705c78df14eSAriel Elior 		/* in case of error, need to free the previously
1706c78df14eSAriel Elior 		 * initiliazed hwfn 0.
1707c78df14eSAriel Elior 		 */
1708fe56b9e6SYuval Mintz 		if (rc) {
17091408cc1fSYuval Mintz 			if (IS_PF(cdev)) {
1710c78df14eSAriel Elior 				qed_init_free(p_hwfn);
1711c78df14eSAriel Elior 				qed_mcp_free(p_hwfn);
1712c78df14eSAriel Elior 				qed_hw_hwfn_free(p_hwfn);
1713fe56b9e6SYuval Mintz 			}
1714fe56b9e6SYuval Mintz 		}
17151408cc1fSYuval Mintz 	}
1716fe56b9e6SYuval Mintz 
1717c78df14eSAriel Elior 	return rc;
1718fe56b9e6SYuval Mintz }
1719fe56b9e6SYuval Mintz 
1720fe56b9e6SYuval Mintz void qed_hw_remove(struct qed_dev *cdev)
1721fe56b9e6SYuval Mintz {
1722fe56b9e6SYuval Mintz 	int i;
1723fe56b9e6SYuval Mintz 
1724fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
1725fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1726fe56b9e6SYuval Mintz 
17271408cc1fSYuval Mintz 		if (IS_VF(cdev)) {
17280b55e27dSYuval Mintz 			qed_vf_pf_release(p_hwfn);
17291408cc1fSYuval Mintz 			continue;
17301408cc1fSYuval Mintz 		}
17311408cc1fSYuval Mintz 
1732fe56b9e6SYuval Mintz 		qed_init_free(p_hwfn);
1733fe56b9e6SYuval Mintz 		qed_hw_hwfn_free(p_hwfn);
1734fe56b9e6SYuval Mintz 		qed_mcp_free(p_hwfn);
1735fe56b9e6SYuval Mintz 	}
173632a47e72SYuval Mintz 
173732a47e72SYuval Mintz 	qed_iov_free_hw_info(cdev);
1738fe56b9e6SYuval Mintz }
1739fe56b9e6SYuval Mintz 
1740fe56b9e6SYuval Mintz int qed_chain_alloc(struct qed_dev *cdev,
1741fe56b9e6SYuval Mintz 		    enum qed_chain_use_mode intended_use,
1742fe56b9e6SYuval Mintz 		    enum qed_chain_mode mode,
1743fe56b9e6SYuval Mintz 		    u16 num_elems,
1744fe56b9e6SYuval Mintz 		    size_t elem_size,
1745fe56b9e6SYuval Mintz 		    struct qed_chain *p_chain)
1746fe56b9e6SYuval Mintz {
1747fe56b9e6SYuval Mintz 	dma_addr_t p_pbl_phys = 0;
1748fe56b9e6SYuval Mintz 	void *p_pbl_virt = NULL;
1749fe56b9e6SYuval Mintz 	dma_addr_t p_phys = 0;
1750fe56b9e6SYuval Mintz 	void *p_virt = NULL;
1751fe56b9e6SYuval Mintz 	u16 page_cnt = 0;
1752fe56b9e6SYuval Mintz 	size_t size;
1753fe56b9e6SYuval Mintz 
1754fe56b9e6SYuval Mintz 	if (mode == QED_CHAIN_MODE_SINGLE)
1755fe56b9e6SYuval Mintz 		page_cnt = 1;
1756fe56b9e6SYuval Mintz 	else
1757fe56b9e6SYuval Mintz 		page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1758fe56b9e6SYuval Mintz 
1759fe56b9e6SYuval Mintz 	size = page_cnt * QED_CHAIN_PAGE_SIZE;
1760fe56b9e6SYuval Mintz 	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1761fe56b9e6SYuval Mintz 				    size, &p_phys, GFP_KERNEL);
1762fe56b9e6SYuval Mintz 	if (!p_virt) {
1763fe56b9e6SYuval Mintz 		DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1764fe56b9e6SYuval Mintz 		goto nomem;
1765fe56b9e6SYuval Mintz 	}
1766fe56b9e6SYuval Mintz 
1767fe56b9e6SYuval Mintz 	if (mode == QED_CHAIN_MODE_PBL) {
1768fe56b9e6SYuval Mintz 		size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1769fe56b9e6SYuval Mintz 		p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1770fe56b9e6SYuval Mintz 						size, &p_pbl_phys,
1771fe56b9e6SYuval Mintz 						GFP_KERNEL);
1772fe56b9e6SYuval Mintz 		if (!p_pbl_virt) {
1773fe56b9e6SYuval Mintz 			DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1774fe56b9e6SYuval Mintz 			goto nomem;
1775fe56b9e6SYuval Mintz 		}
1776fe56b9e6SYuval Mintz 
1777fe56b9e6SYuval Mintz 		qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1778fe56b9e6SYuval Mintz 				   (u8)elem_size, intended_use,
1779fe56b9e6SYuval Mintz 				   p_pbl_phys, p_pbl_virt);
1780fe56b9e6SYuval Mintz 	} else {
1781fe56b9e6SYuval Mintz 		qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1782fe56b9e6SYuval Mintz 			       (u8)elem_size, intended_use, mode);
1783fe56b9e6SYuval Mintz 	}
1784fe56b9e6SYuval Mintz 
1785fe56b9e6SYuval Mintz 	return 0;
1786fe56b9e6SYuval Mintz 
1787fe56b9e6SYuval Mintz nomem:
1788fe56b9e6SYuval Mintz 	dma_free_coherent(&cdev->pdev->dev,
1789fe56b9e6SYuval Mintz 			  page_cnt * QED_CHAIN_PAGE_SIZE,
1790fe56b9e6SYuval Mintz 			  p_virt, p_phys);
1791fe56b9e6SYuval Mintz 	dma_free_coherent(&cdev->pdev->dev,
1792fe56b9e6SYuval Mintz 			  page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1793fe56b9e6SYuval Mintz 			  p_pbl_virt, p_pbl_phys);
1794fe56b9e6SYuval Mintz 
1795fe56b9e6SYuval Mintz 	return -ENOMEM;
1796fe56b9e6SYuval Mintz }
1797fe56b9e6SYuval Mintz 
1798fe56b9e6SYuval Mintz void qed_chain_free(struct qed_dev *cdev,
1799fe56b9e6SYuval Mintz 		    struct qed_chain *p_chain)
1800fe56b9e6SYuval Mintz {
1801fe56b9e6SYuval Mintz 	size_t size;
1802fe56b9e6SYuval Mintz 
1803fe56b9e6SYuval Mintz 	if (!p_chain->p_virt_addr)
1804fe56b9e6SYuval Mintz 		return;
1805fe56b9e6SYuval Mintz 
1806fe56b9e6SYuval Mintz 	if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1807fe56b9e6SYuval Mintz 		size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1808fe56b9e6SYuval Mintz 		dma_free_coherent(&cdev->pdev->dev, size,
1809fe56b9e6SYuval Mintz 				  p_chain->pbl.p_virt_table,
1810fe56b9e6SYuval Mintz 				  p_chain->pbl.p_phys_table);
1811fe56b9e6SYuval Mintz 	}
1812fe56b9e6SYuval Mintz 
1813fe56b9e6SYuval Mintz 	size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1814fe56b9e6SYuval Mintz 	dma_free_coherent(&cdev->pdev->dev, size,
1815fe56b9e6SYuval Mintz 			  p_chain->p_virt_addr,
1816fe56b9e6SYuval Mintz 			  p_chain->p_phys_addr);
1817fe56b9e6SYuval Mintz }
1818cee4d264SManish Chopra 
1819cee4d264SManish Chopra int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
1820cee4d264SManish Chopra 		    u16 src_id, u16 *dst_id)
1821cee4d264SManish Chopra {
1822cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
1823cee4d264SManish Chopra 		u16 min, max;
1824cee4d264SManish Chopra 
1825cee4d264SManish Chopra 		min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
1826cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
1827cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
1828cee4d264SManish Chopra 			  "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1829cee4d264SManish Chopra 			  src_id, min, max);
1830cee4d264SManish Chopra 
1831cee4d264SManish Chopra 		return -EINVAL;
1832cee4d264SManish Chopra 	}
1833cee4d264SManish Chopra 
1834cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
1835cee4d264SManish Chopra 
1836cee4d264SManish Chopra 	return 0;
1837cee4d264SManish Chopra }
1838cee4d264SManish Chopra 
1839cee4d264SManish Chopra int qed_fw_vport(struct qed_hwfn *p_hwfn,
1840cee4d264SManish Chopra 		 u8 src_id, u8 *dst_id)
1841cee4d264SManish Chopra {
1842cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
1843cee4d264SManish Chopra 		u8 min, max;
1844cee4d264SManish Chopra 
1845cee4d264SManish Chopra 		min = (u8)RESC_START(p_hwfn, QED_VPORT);
1846cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_VPORT);
1847cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
1848cee4d264SManish Chopra 			  "vport id [%d] is not valid, available indices [%d - %d]\n",
1849cee4d264SManish Chopra 			  src_id, min, max);
1850cee4d264SManish Chopra 
1851cee4d264SManish Chopra 		return -EINVAL;
1852cee4d264SManish Chopra 	}
1853cee4d264SManish Chopra 
1854cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
1855cee4d264SManish Chopra 
1856cee4d264SManish Chopra 	return 0;
1857cee4d264SManish Chopra }
1858cee4d264SManish Chopra 
1859cee4d264SManish Chopra int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
1860cee4d264SManish Chopra 		   u8 src_id, u8 *dst_id)
1861cee4d264SManish Chopra {
1862cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
1863cee4d264SManish Chopra 		u8 min, max;
1864cee4d264SManish Chopra 
1865cee4d264SManish Chopra 		min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
1866cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
1867cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
1868cee4d264SManish Chopra 			  "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1869cee4d264SManish Chopra 			  src_id, min, max);
1870cee4d264SManish Chopra 
1871cee4d264SManish Chopra 		return -EINVAL;
1872cee4d264SManish Chopra 	}
1873cee4d264SManish Chopra 
1874cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
1875cee4d264SManish Chopra 
1876cee4d264SManish Chopra 	return 0;
1877cee4d264SManish Chopra }
1878bcd197c8SManish Chopra 
1879bcd197c8SManish Chopra /* Calculate final WFQ values for all vports and configure them.
1880bcd197c8SManish Chopra  * After this configuration each vport will have
1881bcd197c8SManish Chopra  * approx min rate =  min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
1882bcd197c8SManish Chopra  */
1883bcd197c8SManish Chopra static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1884bcd197c8SManish Chopra 					     struct qed_ptt *p_ptt,
1885bcd197c8SManish Chopra 					     u32 min_pf_rate)
1886bcd197c8SManish Chopra {
1887bcd197c8SManish Chopra 	struct init_qm_vport_params *vport_params;
1888bcd197c8SManish Chopra 	int i;
1889bcd197c8SManish Chopra 
1890bcd197c8SManish Chopra 	vport_params = p_hwfn->qm_info.qm_vport_params;
1891bcd197c8SManish Chopra 
1892bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1893bcd197c8SManish Chopra 		u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1894bcd197c8SManish Chopra 
1895bcd197c8SManish Chopra 		vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
1896bcd197c8SManish Chopra 						min_pf_rate;
1897bcd197c8SManish Chopra 		qed_init_vport_wfq(p_hwfn, p_ptt,
1898bcd197c8SManish Chopra 				   vport_params[i].first_tx_pq_id,
1899bcd197c8SManish Chopra 				   vport_params[i].vport_wfq);
1900bcd197c8SManish Chopra 	}
1901bcd197c8SManish Chopra }
1902bcd197c8SManish Chopra 
1903bcd197c8SManish Chopra static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
1904bcd197c8SManish Chopra 				       u32 min_pf_rate)
1905bcd197c8SManish Chopra 
1906bcd197c8SManish Chopra {
1907bcd197c8SManish Chopra 	int i;
1908bcd197c8SManish Chopra 
1909bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
1910bcd197c8SManish Chopra 		p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
1911bcd197c8SManish Chopra }
1912bcd197c8SManish Chopra 
1913bcd197c8SManish Chopra static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1914bcd197c8SManish Chopra 					   struct qed_ptt *p_ptt,
1915bcd197c8SManish Chopra 					   u32 min_pf_rate)
1916bcd197c8SManish Chopra {
1917bcd197c8SManish Chopra 	struct init_qm_vport_params *vport_params;
1918bcd197c8SManish Chopra 	int i;
1919bcd197c8SManish Chopra 
1920bcd197c8SManish Chopra 	vport_params = p_hwfn->qm_info.qm_vport_params;
1921bcd197c8SManish Chopra 
1922bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1923bcd197c8SManish Chopra 		qed_init_wfq_default_param(p_hwfn, min_pf_rate);
1924bcd197c8SManish Chopra 		qed_init_vport_wfq(p_hwfn, p_ptt,
1925bcd197c8SManish Chopra 				   vport_params[i].first_tx_pq_id,
1926bcd197c8SManish Chopra 				   vport_params[i].vport_wfq);
1927bcd197c8SManish Chopra 	}
1928bcd197c8SManish Chopra }
1929bcd197c8SManish Chopra 
1930bcd197c8SManish Chopra /* This function performs several validations for WFQ
1931bcd197c8SManish Chopra  * configuration and required min rate for a given vport
1932bcd197c8SManish Chopra  * 1. req_rate must be greater than one percent of min_pf_rate.
1933bcd197c8SManish Chopra  * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
1934bcd197c8SManish Chopra  *    rates to get less than one percent of min_pf_rate.
1935bcd197c8SManish Chopra  * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
1936bcd197c8SManish Chopra  */
1937bcd197c8SManish Chopra static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
1938bcd197c8SManish Chopra 			      u16 vport_id, u32 req_rate,
1939bcd197c8SManish Chopra 			      u32 min_pf_rate)
1940bcd197c8SManish Chopra {
1941bcd197c8SManish Chopra 	u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
1942bcd197c8SManish Chopra 	int non_requested_count = 0, req_count = 0, i, num_vports;
1943bcd197c8SManish Chopra 
1944bcd197c8SManish Chopra 	num_vports = p_hwfn->qm_info.num_vports;
1945bcd197c8SManish Chopra 
1946bcd197c8SManish Chopra 	/* Accounting for the vports which are configured for WFQ explicitly */
1947bcd197c8SManish Chopra 	for (i = 0; i < num_vports; i++) {
1948bcd197c8SManish Chopra 		u32 tmp_speed;
1949bcd197c8SManish Chopra 
1950bcd197c8SManish Chopra 		if ((i != vport_id) &&
1951bcd197c8SManish Chopra 		    p_hwfn->qm_info.wfq_data[i].configured) {
1952bcd197c8SManish Chopra 			req_count++;
1953bcd197c8SManish Chopra 			tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1954bcd197c8SManish Chopra 			total_req_min_rate += tmp_speed;
1955bcd197c8SManish Chopra 		}
1956bcd197c8SManish Chopra 	}
1957bcd197c8SManish Chopra 
1958bcd197c8SManish Chopra 	/* Include current vport data as well */
1959bcd197c8SManish Chopra 	req_count++;
1960bcd197c8SManish Chopra 	total_req_min_rate += req_rate;
1961bcd197c8SManish Chopra 	non_requested_count = num_vports - req_count;
1962bcd197c8SManish Chopra 
1963bcd197c8SManish Chopra 	if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
1964bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1965bcd197c8SManish Chopra 			   "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1966bcd197c8SManish Chopra 			   vport_id, req_rate, min_pf_rate);
1967bcd197c8SManish Chopra 		return -EINVAL;
1968bcd197c8SManish Chopra 	}
1969bcd197c8SManish Chopra 
1970bcd197c8SManish Chopra 	if (num_vports > QED_WFQ_UNIT) {
1971bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1972bcd197c8SManish Chopra 			   "Number of vports is greater than %d\n",
1973bcd197c8SManish Chopra 			   QED_WFQ_UNIT);
1974bcd197c8SManish Chopra 		return -EINVAL;
1975bcd197c8SManish Chopra 	}
1976bcd197c8SManish Chopra 
1977bcd197c8SManish Chopra 	if (total_req_min_rate > min_pf_rate) {
1978bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1979bcd197c8SManish Chopra 			   "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
1980bcd197c8SManish Chopra 			   total_req_min_rate, min_pf_rate);
1981bcd197c8SManish Chopra 		return -EINVAL;
1982bcd197c8SManish Chopra 	}
1983bcd197c8SManish Chopra 
1984bcd197c8SManish Chopra 	total_left_rate	= min_pf_rate - total_req_min_rate;
1985bcd197c8SManish Chopra 
1986bcd197c8SManish Chopra 	left_rate_per_vp = total_left_rate / non_requested_count;
1987bcd197c8SManish Chopra 	if (left_rate_per_vp <  min_pf_rate / QED_WFQ_UNIT) {
1988bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1989bcd197c8SManish Chopra 			   "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1990bcd197c8SManish Chopra 			   left_rate_per_vp, min_pf_rate);
1991bcd197c8SManish Chopra 		return -EINVAL;
1992bcd197c8SManish Chopra 	}
1993bcd197c8SManish Chopra 
1994bcd197c8SManish Chopra 	p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
1995bcd197c8SManish Chopra 	p_hwfn->qm_info.wfq_data[vport_id].configured = true;
1996bcd197c8SManish Chopra 
1997bcd197c8SManish Chopra 	for (i = 0; i < num_vports; i++) {
1998bcd197c8SManish Chopra 		if (p_hwfn->qm_info.wfq_data[i].configured)
1999bcd197c8SManish Chopra 			continue;
2000bcd197c8SManish Chopra 
2001bcd197c8SManish Chopra 		p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
2002bcd197c8SManish Chopra 	}
2003bcd197c8SManish Chopra 
2004bcd197c8SManish Chopra 	return 0;
2005bcd197c8SManish Chopra }
2006bcd197c8SManish Chopra 
2007733def6aSYuval Mintz static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
2008733def6aSYuval Mintz 				     struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
2009733def6aSYuval Mintz {
2010733def6aSYuval Mintz 	struct qed_mcp_link_state *p_link;
2011733def6aSYuval Mintz 	int rc = 0;
2012733def6aSYuval Mintz 
2013733def6aSYuval Mintz 	p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
2014733def6aSYuval Mintz 
2015733def6aSYuval Mintz 	if (!p_link->min_pf_rate) {
2016733def6aSYuval Mintz 		p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
2017733def6aSYuval Mintz 		p_hwfn->qm_info.wfq_data[vp_id].configured = true;
2018733def6aSYuval Mintz 		return rc;
2019733def6aSYuval Mintz 	}
2020733def6aSYuval Mintz 
2021733def6aSYuval Mintz 	rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
2022733def6aSYuval Mintz 
2023733def6aSYuval Mintz 	if (rc == 0)
2024733def6aSYuval Mintz 		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
2025733def6aSYuval Mintz 						 p_link->min_pf_rate);
2026733def6aSYuval Mintz 	else
2027733def6aSYuval Mintz 		DP_NOTICE(p_hwfn,
2028733def6aSYuval Mintz 			  "Validation failed while configuring min rate\n");
2029733def6aSYuval Mintz 
2030733def6aSYuval Mintz 	return rc;
2031733def6aSYuval Mintz }
2032733def6aSYuval Mintz 
2033bcd197c8SManish Chopra static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
2034bcd197c8SManish Chopra 						 struct qed_ptt *p_ptt,
2035bcd197c8SManish Chopra 						 u32 min_pf_rate)
2036bcd197c8SManish Chopra {
2037bcd197c8SManish Chopra 	bool use_wfq = false;
2038bcd197c8SManish Chopra 	int rc = 0;
2039bcd197c8SManish Chopra 	u16 i;
2040bcd197c8SManish Chopra 
2041bcd197c8SManish Chopra 	/* Validate all pre configured vports for wfq */
2042bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
2043bcd197c8SManish Chopra 		u32 rate;
2044bcd197c8SManish Chopra 
2045bcd197c8SManish Chopra 		if (!p_hwfn->qm_info.wfq_data[i].configured)
2046bcd197c8SManish Chopra 			continue;
2047bcd197c8SManish Chopra 
2048bcd197c8SManish Chopra 		rate = p_hwfn->qm_info.wfq_data[i].min_speed;
2049bcd197c8SManish Chopra 		use_wfq = true;
2050bcd197c8SManish Chopra 
2051bcd197c8SManish Chopra 		rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
2052bcd197c8SManish Chopra 		if (rc) {
2053bcd197c8SManish Chopra 			DP_NOTICE(p_hwfn,
2054bcd197c8SManish Chopra 				  "WFQ validation failed while configuring min rate\n");
2055bcd197c8SManish Chopra 			break;
2056bcd197c8SManish Chopra 		}
2057bcd197c8SManish Chopra 	}
2058bcd197c8SManish Chopra 
2059bcd197c8SManish Chopra 	if (!rc && use_wfq)
2060bcd197c8SManish Chopra 		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
2061bcd197c8SManish Chopra 	else
2062bcd197c8SManish Chopra 		qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
2063bcd197c8SManish Chopra 
2064bcd197c8SManish Chopra 	return rc;
2065bcd197c8SManish Chopra }
2066bcd197c8SManish Chopra 
2067733def6aSYuval Mintz /* Main API for qed clients to configure vport min rate.
2068733def6aSYuval Mintz  * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
2069733def6aSYuval Mintz  * rate - Speed in Mbps needs to be assigned to a given vport.
2070733def6aSYuval Mintz  */
2071733def6aSYuval Mintz int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
2072733def6aSYuval Mintz {
2073733def6aSYuval Mintz 	int i, rc = -EINVAL;
2074733def6aSYuval Mintz 
2075733def6aSYuval Mintz 	/* Currently not supported; Might change in future */
2076733def6aSYuval Mintz 	if (cdev->num_hwfns > 1) {
2077733def6aSYuval Mintz 		DP_NOTICE(cdev,
2078733def6aSYuval Mintz 			  "WFQ configuration is not supported for this device\n");
2079733def6aSYuval Mintz 		return rc;
2080733def6aSYuval Mintz 	}
2081733def6aSYuval Mintz 
2082733def6aSYuval Mintz 	for_each_hwfn(cdev, i) {
2083733def6aSYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2084733def6aSYuval Mintz 		struct qed_ptt *p_ptt;
2085733def6aSYuval Mintz 
2086733def6aSYuval Mintz 		p_ptt = qed_ptt_acquire(p_hwfn);
2087733def6aSYuval Mintz 		if (!p_ptt)
2088733def6aSYuval Mintz 			return -EBUSY;
2089733def6aSYuval Mintz 
2090733def6aSYuval Mintz 		rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
2091733def6aSYuval Mintz 
2092733def6aSYuval Mintz 		if (!rc) {
2093733def6aSYuval Mintz 			qed_ptt_release(p_hwfn, p_ptt);
2094733def6aSYuval Mintz 			return rc;
2095733def6aSYuval Mintz 		}
2096733def6aSYuval Mintz 
2097733def6aSYuval Mintz 		qed_ptt_release(p_hwfn, p_ptt);
2098733def6aSYuval Mintz 	}
2099733def6aSYuval Mintz 
2100733def6aSYuval Mintz 	return rc;
2101733def6aSYuval Mintz }
2102733def6aSYuval Mintz 
2103bcd197c8SManish Chopra /* API to configure WFQ from mcp link change */
2104bcd197c8SManish Chopra void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
2105bcd197c8SManish Chopra {
2106bcd197c8SManish Chopra 	int i;
2107bcd197c8SManish Chopra 
21083e7cfce2SYuval Mintz 	if (cdev->num_hwfns > 1) {
21093e7cfce2SYuval Mintz 		DP_VERBOSE(cdev,
21103e7cfce2SYuval Mintz 			   NETIF_MSG_LINK,
21113e7cfce2SYuval Mintz 			   "WFQ configuration is not supported for this device\n");
21123e7cfce2SYuval Mintz 		return;
21133e7cfce2SYuval Mintz 	}
21143e7cfce2SYuval Mintz 
2115bcd197c8SManish Chopra 	for_each_hwfn(cdev, i) {
2116bcd197c8SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2117bcd197c8SManish Chopra 
2118bcd197c8SManish Chopra 		__qed_configure_vp_wfq_on_link_change(p_hwfn,
2119bcd197c8SManish Chopra 						      p_hwfn->p_dpc_ptt,
2120bcd197c8SManish Chopra 						      min_pf_rate);
2121bcd197c8SManish Chopra 	}
2122bcd197c8SManish Chopra }
21234b01e519SManish Chopra 
21244b01e519SManish Chopra int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
21254b01e519SManish Chopra 				     struct qed_ptt *p_ptt,
21264b01e519SManish Chopra 				     struct qed_mcp_link_state *p_link,
21274b01e519SManish Chopra 				     u8 max_bw)
21284b01e519SManish Chopra {
21294b01e519SManish Chopra 	int rc = 0;
21304b01e519SManish Chopra 
21314b01e519SManish Chopra 	p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
21324b01e519SManish Chopra 
21334b01e519SManish Chopra 	if (!p_link->line_speed && (max_bw != 100))
21344b01e519SManish Chopra 		return rc;
21354b01e519SManish Chopra 
21364b01e519SManish Chopra 	p_link->speed = (p_link->line_speed * max_bw) / 100;
21374b01e519SManish Chopra 	p_hwfn->qm_info.pf_rl = p_link->speed;
21384b01e519SManish Chopra 
21394b01e519SManish Chopra 	/* Since the limiter also affects Tx-switched traffic, we don't want it
21404b01e519SManish Chopra 	 * to limit such traffic in case there's no actual limit.
21414b01e519SManish Chopra 	 * In that case, set limit to imaginary high boundary.
21424b01e519SManish Chopra 	 */
21434b01e519SManish Chopra 	if (max_bw == 100)
21444b01e519SManish Chopra 		p_hwfn->qm_info.pf_rl = 100000;
21454b01e519SManish Chopra 
21464b01e519SManish Chopra 	rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
21474b01e519SManish Chopra 			    p_hwfn->qm_info.pf_rl);
21484b01e519SManish Chopra 
21494b01e519SManish Chopra 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
21504b01e519SManish Chopra 		   "Configured MAX bandwidth to be %08x Mb/sec\n",
21514b01e519SManish Chopra 		   p_link->speed);
21524b01e519SManish Chopra 
21534b01e519SManish Chopra 	return rc;
21544b01e519SManish Chopra }
21554b01e519SManish Chopra 
21564b01e519SManish Chopra /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
21574b01e519SManish Chopra int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
21584b01e519SManish Chopra {
21594b01e519SManish Chopra 	int i, rc = -EINVAL;
21604b01e519SManish Chopra 
21614b01e519SManish Chopra 	if (max_bw < 1 || max_bw > 100) {
21624b01e519SManish Chopra 		DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
21634b01e519SManish Chopra 		return rc;
21644b01e519SManish Chopra 	}
21654b01e519SManish Chopra 
21664b01e519SManish Chopra 	for_each_hwfn(cdev, i) {
21674b01e519SManish Chopra 		struct qed_hwfn	*p_hwfn = &cdev->hwfns[i];
21684b01e519SManish Chopra 		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
21694b01e519SManish Chopra 		struct qed_mcp_link_state *p_link;
21704b01e519SManish Chopra 		struct qed_ptt *p_ptt;
21714b01e519SManish Chopra 
21724b01e519SManish Chopra 		p_link = &p_lead->mcp_info->link_output;
21734b01e519SManish Chopra 
21744b01e519SManish Chopra 		p_ptt = qed_ptt_acquire(p_hwfn);
21754b01e519SManish Chopra 		if (!p_ptt)
21764b01e519SManish Chopra 			return -EBUSY;
21774b01e519SManish Chopra 
21784b01e519SManish Chopra 		rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
21794b01e519SManish Chopra 						      p_link, max_bw);
21804b01e519SManish Chopra 
21814b01e519SManish Chopra 		qed_ptt_release(p_hwfn, p_ptt);
21824b01e519SManish Chopra 
21834b01e519SManish Chopra 		if (rc)
21844b01e519SManish Chopra 			break;
21854b01e519SManish Chopra 	}
21864b01e519SManish Chopra 
21874b01e519SManish Chopra 	return rc;
21884b01e519SManish Chopra }
2189a64b02d5SManish Chopra 
2190a64b02d5SManish Chopra int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
2191a64b02d5SManish Chopra 				     struct qed_ptt *p_ptt,
2192a64b02d5SManish Chopra 				     struct qed_mcp_link_state *p_link,
2193a64b02d5SManish Chopra 				     u8 min_bw)
2194a64b02d5SManish Chopra {
2195a64b02d5SManish Chopra 	int rc = 0;
2196a64b02d5SManish Chopra 
2197a64b02d5SManish Chopra 	p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
2198a64b02d5SManish Chopra 	p_hwfn->qm_info.pf_wfq = min_bw;
2199a64b02d5SManish Chopra 
2200a64b02d5SManish Chopra 	if (!p_link->line_speed)
2201a64b02d5SManish Chopra 		return rc;
2202a64b02d5SManish Chopra 
2203a64b02d5SManish Chopra 	p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
2204a64b02d5SManish Chopra 
2205a64b02d5SManish Chopra 	rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
2206a64b02d5SManish Chopra 
2207a64b02d5SManish Chopra 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
2208a64b02d5SManish Chopra 		   "Configured MIN bandwidth to be %d Mb/sec\n",
2209a64b02d5SManish Chopra 		   p_link->min_pf_rate);
2210a64b02d5SManish Chopra 
2211a64b02d5SManish Chopra 	return rc;
2212a64b02d5SManish Chopra }
2213a64b02d5SManish Chopra 
2214a64b02d5SManish Chopra /* Main API to configure PF min bandwidth where bw range is [1-100] */
2215a64b02d5SManish Chopra int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
2216a64b02d5SManish Chopra {
2217a64b02d5SManish Chopra 	int i, rc = -EINVAL;
2218a64b02d5SManish Chopra 
2219a64b02d5SManish Chopra 	if (min_bw < 1 || min_bw > 100) {
2220a64b02d5SManish Chopra 		DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
2221a64b02d5SManish Chopra 		return rc;
2222a64b02d5SManish Chopra 	}
2223a64b02d5SManish Chopra 
2224a64b02d5SManish Chopra 	for_each_hwfn(cdev, i) {
2225a64b02d5SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2226a64b02d5SManish Chopra 		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
2227a64b02d5SManish Chopra 		struct qed_mcp_link_state *p_link;
2228a64b02d5SManish Chopra 		struct qed_ptt *p_ptt;
2229a64b02d5SManish Chopra 
2230a64b02d5SManish Chopra 		p_link = &p_lead->mcp_info->link_output;
2231a64b02d5SManish Chopra 
2232a64b02d5SManish Chopra 		p_ptt = qed_ptt_acquire(p_hwfn);
2233a64b02d5SManish Chopra 		if (!p_ptt)
2234a64b02d5SManish Chopra 			return -EBUSY;
2235a64b02d5SManish Chopra 
2236a64b02d5SManish Chopra 		rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
2237a64b02d5SManish Chopra 						      p_link, min_bw);
2238a64b02d5SManish Chopra 		if (rc) {
2239a64b02d5SManish Chopra 			qed_ptt_release(p_hwfn, p_ptt);
2240a64b02d5SManish Chopra 			return rc;
2241a64b02d5SManish Chopra 		}
2242a64b02d5SManish Chopra 
2243a64b02d5SManish Chopra 		if (p_link->min_pf_rate) {
2244a64b02d5SManish Chopra 			u32 min_rate = p_link->min_pf_rate;
2245a64b02d5SManish Chopra 
2246a64b02d5SManish Chopra 			rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
2247a64b02d5SManish Chopra 								   p_ptt,
2248a64b02d5SManish Chopra 								   min_rate);
2249a64b02d5SManish Chopra 		}
2250a64b02d5SManish Chopra 
2251a64b02d5SManish Chopra 		qed_ptt_release(p_hwfn, p_ptt);
2252a64b02d5SManish Chopra 	}
2253a64b02d5SManish Chopra 
2254a64b02d5SManish Chopra 	return rc;
2255a64b02d5SManish Chopra }
2256733def6aSYuval Mintz 
2257733def6aSYuval Mintz void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2258733def6aSYuval Mintz {
2259733def6aSYuval Mintz 	struct qed_mcp_link_state *p_link;
2260733def6aSYuval Mintz 
2261733def6aSYuval Mintz 	p_link = &p_hwfn->mcp_info->link_output;
2262733def6aSYuval Mintz 
2263733def6aSYuval Mintz 	if (p_link->min_pf_rate)
2264733def6aSYuval Mintz 		qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
2265733def6aSYuval Mintz 					       p_link->min_pf_rate);
2266733def6aSYuval Mintz 
2267733def6aSYuval Mintz 	memset(p_hwfn->qm_info.wfq_data, 0,
2268733def6aSYuval Mintz 	       sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
2269733def6aSYuval Mintz }
2270