1fe56b9e6SYuval Mintz /* QLogic qed NIC Driver
2fe56b9e6SYuval Mintz  * Copyright (c) 2015 QLogic Corporation
3fe56b9e6SYuval Mintz  *
4fe56b9e6SYuval Mintz  * This software is available under the terms of the GNU General Public License
5fe56b9e6SYuval Mintz  * (GPL) Version 2, available from the file COPYING in the main directory of
6fe56b9e6SYuval Mintz  * this source tree.
7fe56b9e6SYuval Mintz  */
8fe56b9e6SYuval Mintz 
9fe56b9e6SYuval Mintz #include <linux/types.h>
10fe56b9e6SYuval Mintz #include <asm/byteorder.h>
11fe56b9e6SYuval Mintz #include <linux/io.h>
12fe56b9e6SYuval Mintz #include <linux/delay.h>
13fe56b9e6SYuval Mintz #include <linux/dma-mapping.h>
14fe56b9e6SYuval Mintz #include <linux/errno.h>
15fe56b9e6SYuval Mintz #include <linux/kernel.h>
16fe56b9e6SYuval Mintz #include <linux/mutex.h>
17fe56b9e6SYuval Mintz #include <linux/pci.h>
18fe56b9e6SYuval Mintz #include <linux/slab.h>
19fe56b9e6SYuval Mintz #include <linux/string.h>
20fe56b9e6SYuval Mintz #include <linux/etherdevice.h>
21fe56b9e6SYuval Mintz #include <linux/qed/qed_chain.h>
22fe56b9e6SYuval Mintz #include <linux/qed/qed_if.h>
23fe56b9e6SYuval Mintz #include "qed.h"
24fe56b9e6SYuval Mintz #include "qed_cxt.h"
25fe56b9e6SYuval Mintz #include "qed_dev_api.h"
26fe56b9e6SYuval Mintz #include "qed_hsi.h"
27fe56b9e6SYuval Mintz #include "qed_hw.h"
28fe56b9e6SYuval Mintz #include "qed_init_ops.h"
29fe56b9e6SYuval Mintz #include "qed_int.h"
30fe56b9e6SYuval Mintz #include "qed_mcp.h"
31fe56b9e6SYuval Mintz #include "qed_reg_addr.h"
32fe56b9e6SYuval Mintz #include "qed_sp.h"
3332a47e72SYuval Mintz #include "qed_sriov.h"
34fe56b9e6SYuval Mintz 
35fe56b9e6SYuval Mintz /* API common to all protocols */
36c2035eeaSRam Amrani enum BAR_ID {
37c2035eeaSRam Amrani 	BAR_ID_0,       /* used for GRC */
38c2035eeaSRam Amrani 	BAR_ID_1        /* Used for doorbells */
39c2035eeaSRam Amrani };
40c2035eeaSRam Amrani 
41c2035eeaSRam Amrani static u32 qed_hw_bar_size(struct qed_hwfn	*p_hwfn,
42c2035eeaSRam Amrani 			   enum BAR_ID		bar_id)
43c2035eeaSRam Amrani {
44c2035eeaSRam Amrani 	u32	bar_reg = (bar_id == BAR_ID_0 ?
45c2035eeaSRam Amrani 			   PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
46c2035eeaSRam Amrani 	u32	val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
47c2035eeaSRam Amrani 
48c2035eeaSRam Amrani 	if (val)
49c2035eeaSRam Amrani 		return 1 << (val + 15);
50c2035eeaSRam Amrani 
51c2035eeaSRam Amrani 	/* Old MFW initialized above registered only conditionally */
52c2035eeaSRam Amrani 	if (p_hwfn->cdev->num_hwfns > 1) {
53c2035eeaSRam Amrani 		DP_INFO(p_hwfn,
54c2035eeaSRam Amrani 			"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
55c2035eeaSRam Amrani 			return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
56c2035eeaSRam Amrani 	} else {
57c2035eeaSRam Amrani 		DP_INFO(p_hwfn,
58c2035eeaSRam Amrani 			"BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
59c2035eeaSRam Amrani 			return 512 * 1024;
60c2035eeaSRam Amrani 	}
61c2035eeaSRam Amrani }
62c2035eeaSRam Amrani 
63fe56b9e6SYuval Mintz void qed_init_dp(struct qed_dev *cdev,
64fe56b9e6SYuval Mintz 		 u32 dp_module, u8 dp_level)
65fe56b9e6SYuval Mintz {
66fe56b9e6SYuval Mintz 	u32 i;
67fe56b9e6SYuval Mintz 
68fe56b9e6SYuval Mintz 	cdev->dp_level = dp_level;
69fe56b9e6SYuval Mintz 	cdev->dp_module = dp_module;
70fe56b9e6SYuval Mintz 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
71fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
72fe56b9e6SYuval Mintz 
73fe56b9e6SYuval Mintz 		p_hwfn->dp_level = dp_level;
74fe56b9e6SYuval Mintz 		p_hwfn->dp_module = dp_module;
75fe56b9e6SYuval Mintz 	}
76fe56b9e6SYuval Mintz }
77fe56b9e6SYuval Mintz 
78fe56b9e6SYuval Mintz void qed_init_struct(struct qed_dev *cdev)
79fe56b9e6SYuval Mintz {
80fe56b9e6SYuval Mintz 	u8 i;
81fe56b9e6SYuval Mintz 
82fe56b9e6SYuval Mintz 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
83fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
84fe56b9e6SYuval Mintz 
85fe56b9e6SYuval Mintz 		p_hwfn->cdev = cdev;
86fe56b9e6SYuval Mintz 		p_hwfn->my_id = i;
87fe56b9e6SYuval Mintz 		p_hwfn->b_active = false;
88fe56b9e6SYuval Mintz 
89fe56b9e6SYuval Mintz 		mutex_init(&p_hwfn->dmae_info.mutex);
90fe56b9e6SYuval Mintz 	}
91fe56b9e6SYuval Mintz 
92fe56b9e6SYuval Mintz 	/* hwfn 0 is always active */
93fe56b9e6SYuval Mintz 	cdev->hwfns[0].b_active = true;
94fe56b9e6SYuval Mintz 
95fe56b9e6SYuval Mintz 	/* set the default cache alignment to 128 */
96fe56b9e6SYuval Mintz 	cdev->cache_shift = 7;
97fe56b9e6SYuval Mintz }
98fe56b9e6SYuval Mintz 
99fe56b9e6SYuval Mintz static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
100fe56b9e6SYuval Mintz {
101fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
102fe56b9e6SYuval Mintz 
103fe56b9e6SYuval Mintz 	kfree(qm_info->qm_pq_params);
104fe56b9e6SYuval Mintz 	qm_info->qm_pq_params = NULL;
105fe56b9e6SYuval Mintz 	kfree(qm_info->qm_vport_params);
106fe56b9e6SYuval Mintz 	qm_info->qm_vport_params = NULL;
107fe56b9e6SYuval Mintz 	kfree(qm_info->qm_port_params);
108fe56b9e6SYuval Mintz 	qm_info->qm_port_params = NULL;
109bcd197c8SManish Chopra 	kfree(qm_info->wfq_data);
110bcd197c8SManish Chopra 	qm_info->wfq_data = NULL;
111fe56b9e6SYuval Mintz }
112fe56b9e6SYuval Mintz 
113fe56b9e6SYuval Mintz void qed_resc_free(struct qed_dev *cdev)
114fe56b9e6SYuval Mintz {
115fe56b9e6SYuval Mintz 	int i;
116fe56b9e6SYuval Mintz 
117fe56b9e6SYuval Mintz 	kfree(cdev->fw_data);
118fe56b9e6SYuval Mintz 	cdev->fw_data = NULL;
119fe56b9e6SYuval Mintz 
120fe56b9e6SYuval Mintz 	kfree(cdev->reset_stats);
121fe56b9e6SYuval Mintz 
122fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
123fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
124fe56b9e6SYuval Mintz 
12525c089d7SYuval Mintz 		kfree(p_hwfn->p_tx_cids);
12625c089d7SYuval Mintz 		p_hwfn->p_tx_cids = NULL;
12725c089d7SYuval Mintz 		kfree(p_hwfn->p_rx_cids);
12825c089d7SYuval Mintz 		p_hwfn->p_rx_cids = NULL;
12925c089d7SYuval Mintz 	}
13025c089d7SYuval Mintz 
13125c089d7SYuval Mintz 	for_each_hwfn(cdev, i) {
13225c089d7SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
13325c089d7SYuval Mintz 
134fe56b9e6SYuval Mintz 		qed_cxt_mngr_free(p_hwfn);
135fe56b9e6SYuval Mintz 		qed_qm_info_free(p_hwfn);
136fe56b9e6SYuval Mintz 		qed_spq_free(p_hwfn);
137fe56b9e6SYuval Mintz 		qed_eq_free(p_hwfn, p_hwfn->p_eq);
138fe56b9e6SYuval Mintz 		qed_consq_free(p_hwfn, p_hwfn->p_consq);
139fe56b9e6SYuval Mintz 		qed_int_free(p_hwfn);
14032a47e72SYuval Mintz 		qed_iov_free(p_hwfn);
141fe56b9e6SYuval Mintz 		qed_dmae_info_free(p_hwfn);
142fe56b9e6SYuval Mintz 	}
143fe56b9e6SYuval Mintz }
144fe56b9e6SYuval Mintz 
145fe56b9e6SYuval Mintz static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
146fe56b9e6SYuval Mintz {
147fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
148fe56b9e6SYuval Mintz 	struct init_qm_port_params *p_qm_port;
149fe56b9e6SYuval Mintz 	u8 num_vports, i, vport_id, num_ports;
150fe56b9e6SYuval Mintz 	u16 num_pqs, multi_cos_tcs = 1;
151fe56b9e6SYuval Mintz 
152fe56b9e6SYuval Mintz 	memset(qm_info, 0, sizeof(*qm_info));
153fe56b9e6SYuval Mintz 
154fe56b9e6SYuval Mintz 	num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
155fe56b9e6SYuval Mintz 	num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
156fe56b9e6SYuval Mintz 
157fe56b9e6SYuval Mintz 	/* Sanity checking that setup requires legal number of resources */
158fe56b9e6SYuval Mintz 	if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
159fe56b9e6SYuval Mintz 		DP_ERR(p_hwfn,
160fe56b9e6SYuval Mintz 		       "Need too many Physical queues - 0x%04x when only %04x are available\n",
161fe56b9e6SYuval Mintz 		       num_pqs, RESC_NUM(p_hwfn, QED_PQ));
162fe56b9e6SYuval Mintz 		return -EINVAL;
163fe56b9e6SYuval Mintz 	}
164fe56b9e6SYuval Mintz 
165fe56b9e6SYuval Mintz 	/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
166fe56b9e6SYuval Mintz 	 */
167fe56b9e6SYuval Mintz 	qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
16860fffb3bSYuval Mintz 					num_pqs, GFP_KERNEL);
169fe56b9e6SYuval Mintz 	if (!qm_info->qm_pq_params)
170fe56b9e6SYuval Mintz 		goto alloc_err;
171fe56b9e6SYuval Mintz 
172fe56b9e6SYuval Mintz 	qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
17360fffb3bSYuval Mintz 					   num_vports, GFP_KERNEL);
174fe56b9e6SYuval Mintz 	if (!qm_info->qm_vport_params)
175fe56b9e6SYuval Mintz 		goto alloc_err;
176fe56b9e6SYuval Mintz 
177fe56b9e6SYuval Mintz 	qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
17860fffb3bSYuval Mintz 					  MAX_NUM_PORTS, GFP_KERNEL);
179fe56b9e6SYuval Mintz 	if (!qm_info->qm_port_params)
180fe56b9e6SYuval Mintz 		goto alloc_err;
181fe56b9e6SYuval Mintz 
182bcd197c8SManish Chopra 	qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
183bcd197c8SManish Chopra 				    GFP_KERNEL);
184bcd197c8SManish Chopra 	if (!qm_info->wfq_data)
185bcd197c8SManish Chopra 		goto alloc_err;
186bcd197c8SManish Chopra 
187fe56b9e6SYuval Mintz 	vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
188fe56b9e6SYuval Mintz 
189fe56b9e6SYuval Mintz 	/* First init per-TC PQs */
190fe56b9e6SYuval Mintz 	for (i = 0; i < multi_cos_tcs; i++) {
191fe56b9e6SYuval Mintz 		struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
192fe56b9e6SYuval Mintz 
193fe56b9e6SYuval Mintz 		params->vport_id = vport_id;
194fe56b9e6SYuval Mintz 		params->tc_id = p_hwfn->hw_info.non_offload_tc;
195fe56b9e6SYuval Mintz 		params->wrr_group = 1;
196fe56b9e6SYuval Mintz 	}
197fe56b9e6SYuval Mintz 
198fe56b9e6SYuval Mintz 	/* Then init pure-LB PQ */
199fe56b9e6SYuval Mintz 	qm_info->pure_lb_pq = i;
200fe56b9e6SYuval Mintz 	qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
201fe56b9e6SYuval Mintz 	qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
202fe56b9e6SYuval Mintz 	qm_info->qm_pq_params[i].wrr_group = 1;
203fe56b9e6SYuval Mintz 	i++;
204fe56b9e6SYuval Mintz 
205fe56b9e6SYuval Mintz 	qm_info->offload_pq = 0;
206fe56b9e6SYuval Mintz 	qm_info->num_pqs = num_pqs;
207fe56b9e6SYuval Mintz 	qm_info->num_vports = num_vports;
208fe56b9e6SYuval Mintz 
209fe56b9e6SYuval Mintz 	/* Initialize qm port parameters */
210fe56b9e6SYuval Mintz 	num_ports = p_hwfn->cdev->num_ports_in_engines;
211fe56b9e6SYuval Mintz 	for (i = 0; i < num_ports; i++) {
212fe56b9e6SYuval Mintz 		p_qm_port = &qm_info->qm_port_params[i];
213fe56b9e6SYuval Mintz 		p_qm_port->active = 1;
214fe56b9e6SYuval Mintz 		p_qm_port->num_active_phys_tcs = 4;
215fe56b9e6SYuval Mintz 		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
216fe56b9e6SYuval Mintz 		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
217fe56b9e6SYuval Mintz 	}
218fe56b9e6SYuval Mintz 
219fe56b9e6SYuval Mintz 	qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
220fe56b9e6SYuval Mintz 
221fe56b9e6SYuval Mintz 	qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
222fe56b9e6SYuval Mintz 
223fe56b9e6SYuval Mintz 	qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
224fe56b9e6SYuval Mintz 
225a64b02d5SManish Chopra 	for (i = 0; i < qm_info->num_vports; i++)
226a64b02d5SManish Chopra 		qm_info->qm_vport_params[i].vport_wfq = 1;
227a64b02d5SManish Chopra 
228fe56b9e6SYuval Mintz 	qm_info->pf_wfq = 0;
229fe56b9e6SYuval Mintz 	qm_info->pf_rl = 0;
230fe56b9e6SYuval Mintz 	qm_info->vport_rl_en = 1;
231a64b02d5SManish Chopra 	qm_info->vport_wfq_en = 1;
232fe56b9e6SYuval Mintz 
233fe56b9e6SYuval Mintz 	return 0;
234fe56b9e6SYuval Mintz 
235fe56b9e6SYuval Mintz alloc_err:
236fe56b9e6SYuval Mintz 	DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
237bcd197c8SManish Chopra 	qed_qm_info_free(p_hwfn);
238fe56b9e6SYuval Mintz 	return -ENOMEM;
239fe56b9e6SYuval Mintz }
240fe56b9e6SYuval Mintz 
241fe56b9e6SYuval Mintz int qed_resc_alloc(struct qed_dev *cdev)
242fe56b9e6SYuval Mintz {
243fe56b9e6SYuval Mintz 	struct qed_consq *p_consq;
244fe56b9e6SYuval Mintz 	struct qed_eq *p_eq;
245fe56b9e6SYuval Mintz 	int i, rc = 0;
246fe56b9e6SYuval Mintz 
247fe56b9e6SYuval Mintz 	cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
248fe56b9e6SYuval Mintz 	if (!cdev->fw_data)
249fe56b9e6SYuval Mintz 		return -ENOMEM;
250fe56b9e6SYuval Mintz 
25125c089d7SYuval Mintz 	/* Allocate Memory for the Queue->CID mapping */
25225c089d7SYuval Mintz 	for_each_hwfn(cdev, i) {
25325c089d7SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
25425c089d7SYuval Mintz 		int tx_size = sizeof(struct qed_hw_cid_data) *
25525c089d7SYuval Mintz 				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
25625c089d7SYuval Mintz 		int rx_size = sizeof(struct qed_hw_cid_data) *
25725c089d7SYuval Mintz 				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
25825c089d7SYuval Mintz 
25925c089d7SYuval Mintz 		p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
26025c089d7SYuval Mintz 		if (!p_hwfn->p_tx_cids) {
26125c089d7SYuval Mintz 			DP_NOTICE(p_hwfn,
26225c089d7SYuval Mintz 				  "Failed to allocate memory for Tx Cids\n");
2639b15acbfSDan Carpenter 			rc = -ENOMEM;
26425c089d7SYuval Mintz 			goto alloc_err;
26525c089d7SYuval Mintz 		}
26625c089d7SYuval Mintz 
26725c089d7SYuval Mintz 		p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
26825c089d7SYuval Mintz 		if (!p_hwfn->p_rx_cids) {
26925c089d7SYuval Mintz 			DP_NOTICE(p_hwfn,
27025c089d7SYuval Mintz 				  "Failed to allocate memory for Rx Cids\n");
2719b15acbfSDan Carpenter 			rc = -ENOMEM;
27225c089d7SYuval Mintz 			goto alloc_err;
27325c089d7SYuval Mintz 		}
27425c089d7SYuval Mintz 	}
27525c089d7SYuval Mintz 
276fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
277fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
278fe56b9e6SYuval Mintz 
279fe56b9e6SYuval Mintz 		/* First allocate the context manager structure */
280fe56b9e6SYuval Mintz 		rc = qed_cxt_mngr_alloc(p_hwfn);
281fe56b9e6SYuval Mintz 		if (rc)
282fe56b9e6SYuval Mintz 			goto alloc_err;
283fe56b9e6SYuval Mintz 
284fe56b9e6SYuval Mintz 		/* Set the HW cid/tid numbers (in the contest manager)
285fe56b9e6SYuval Mintz 		 * Must be done prior to any further computations.
286fe56b9e6SYuval Mintz 		 */
287fe56b9e6SYuval Mintz 		rc = qed_cxt_set_pf_params(p_hwfn);
288fe56b9e6SYuval Mintz 		if (rc)
289fe56b9e6SYuval Mintz 			goto alloc_err;
290fe56b9e6SYuval Mintz 
291fe56b9e6SYuval Mintz 		/* Prepare and process QM requirements */
292fe56b9e6SYuval Mintz 		rc = qed_init_qm_info(p_hwfn);
293fe56b9e6SYuval Mintz 		if (rc)
294fe56b9e6SYuval Mintz 			goto alloc_err;
295fe56b9e6SYuval Mintz 
296fe56b9e6SYuval Mintz 		/* Compute the ILT client partition */
297fe56b9e6SYuval Mintz 		rc = qed_cxt_cfg_ilt_compute(p_hwfn);
298fe56b9e6SYuval Mintz 		if (rc)
299fe56b9e6SYuval Mintz 			goto alloc_err;
300fe56b9e6SYuval Mintz 
301fe56b9e6SYuval Mintz 		/* CID map / ILT shadow table / T2
302fe56b9e6SYuval Mintz 		 * The talbes sizes are determined by the computations above
303fe56b9e6SYuval Mintz 		 */
304fe56b9e6SYuval Mintz 		rc = qed_cxt_tables_alloc(p_hwfn);
305fe56b9e6SYuval Mintz 		if (rc)
306fe56b9e6SYuval Mintz 			goto alloc_err;
307fe56b9e6SYuval Mintz 
308fe56b9e6SYuval Mintz 		/* SPQ, must follow ILT because initializes SPQ context */
309fe56b9e6SYuval Mintz 		rc = qed_spq_alloc(p_hwfn);
310fe56b9e6SYuval Mintz 		if (rc)
311fe56b9e6SYuval Mintz 			goto alloc_err;
312fe56b9e6SYuval Mintz 
313fe56b9e6SYuval Mintz 		/* SP status block allocation */
314fe56b9e6SYuval Mintz 		p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
315fe56b9e6SYuval Mintz 							 RESERVED_PTT_DPC);
316fe56b9e6SYuval Mintz 
317fe56b9e6SYuval Mintz 		rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
318fe56b9e6SYuval Mintz 		if (rc)
319fe56b9e6SYuval Mintz 			goto alloc_err;
320fe56b9e6SYuval Mintz 
32132a47e72SYuval Mintz 		rc = qed_iov_alloc(p_hwfn);
32232a47e72SYuval Mintz 		if (rc)
32332a47e72SYuval Mintz 			goto alloc_err;
32432a47e72SYuval Mintz 
325fe56b9e6SYuval Mintz 		/* EQ */
326fe56b9e6SYuval Mintz 		p_eq = qed_eq_alloc(p_hwfn, 256);
3279b15acbfSDan Carpenter 		if (!p_eq) {
3289b15acbfSDan Carpenter 			rc = -ENOMEM;
329fe56b9e6SYuval Mintz 			goto alloc_err;
3309b15acbfSDan Carpenter 		}
331fe56b9e6SYuval Mintz 		p_hwfn->p_eq = p_eq;
332fe56b9e6SYuval Mintz 
333fe56b9e6SYuval Mintz 		p_consq = qed_consq_alloc(p_hwfn);
3349b15acbfSDan Carpenter 		if (!p_consq) {
3359b15acbfSDan Carpenter 			rc = -ENOMEM;
336fe56b9e6SYuval Mintz 			goto alloc_err;
3379b15acbfSDan Carpenter 		}
338fe56b9e6SYuval Mintz 		p_hwfn->p_consq = p_consq;
339fe56b9e6SYuval Mintz 
340fe56b9e6SYuval Mintz 		/* DMA info initialization */
341fe56b9e6SYuval Mintz 		rc = qed_dmae_info_alloc(p_hwfn);
342fe56b9e6SYuval Mintz 		if (rc) {
343fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn,
344fe56b9e6SYuval Mintz 				  "Failed to allocate memory for dmae_info structure\n");
345fe56b9e6SYuval Mintz 			goto alloc_err;
346fe56b9e6SYuval Mintz 		}
347fe56b9e6SYuval Mintz 	}
348fe56b9e6SYuval Mintz 
349fe56b9e6SYuval Mintz 	cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
350fe56b9e6SYuval Mintz 	if (!cdev->reset_stats) {
351fe56b9e6SYuval Mintz 		DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
3529b15acbfSDan Carpenter 		rc = -ENOMEM;
353fe56b9e6SYuval Mintz 		goto alloc_err;
354fe56b9e6SYuval Mintz 	}
355fe56b9e6SYuval Mintz 
356fe56b9e6SYuval Mintz 	return 0;
357fe56b9e6SYuval Mintz 
358fe56b9e6SYuval Mintz alloc_err:
359fe56b9e6SYuval Mintz 	qed_resc_free(cdev);
360fe56b9e6SYuval Mintz 	return rc;
361fe56b9e6SYuval Mintz }
362fe56b9e6SYuval Mintz 
363fe56b9e6SYuval Mintz void qed_resc_setup(struct qed_dev *cdev)
364fe56b9e6SYuval Mintz {
365fe56b9e6SYuval Mintz 	int i;
366fe56b9e6SYuval Mintz 
367fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
368fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
369fe56b9e6SYuval Mintz 
370fe56b9e6SYuval Mintz 		qed_cxt_mngr_setup(p_hwfn);
371fe56b9e6SYuval Mintz 		qed_spq_setup(p_hwfn);
372fe56b9e6SYuval Mintz 		qed_eq_setup(p_hwfn, p_hwfn->p_eq);
373fe56b9e6SYuval Mintz 		qed_consq_setup(p_hwfn, p_hwfn->p_consq);
374fe56b9e6SYuval Mintz 
375fe56b9e6SYuval Mintz 		/* Read shadow of current MFW mailbox */
376fe56b9e6SYuval Mintz 		qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
377fe56b9e6SYuval Mintz 		memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
378fe56b9e6SYuval Mintz 		       p_hwfn->mcp_info->mfw_mb_cur,
379fe56b9e6SYuval Mintz 		       p_hwfn->mcp_info->mfw_mb_length);
380fe56b9e6SYuval Mintz 
381fe56b9e6SYuval Mintz 		qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
38232a47e72SYuval Mintz 
38332a47e72SYuval Mintz 		qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
384fe56b9e6SYuval Mintz 	}
385fe56b9e6SYuval Mintz }
386fe56b9e6SYuval Mintz 
387fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_CNT          (100)
388fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_TIME         (10)
389fe56b9e6SYuval Mintz int qed_final_cleanup(struct qed_hwfn *p_hwfn,
390fe56b9e6SYuval Mintz 		      struct qed_ptt *p_ptt,
391fe56b9e6SYuval Mintz 		      u16 id)
392fe56b9e6SYuval Mintz {
393fe56b9e6SYuval Mintz 	u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
394fe56b9e6SYuval Mintz 	int rc = -EBUSY;
395fe56b9e6SYuval Mintz 
396fc48b7a6SYuval Mintz 	addr = GTT_BAR0_MAP_REG_USDM_RAM +
397fc48b7a6SYuval Mintz 		USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
398fe56b9e6SYuval Mintz 
399fc48b7a6SYuval Mintz 	command |= X_FINAL_CLEANUP_AGG_INT <<
400fc48b7a6SYuval Mintz 		SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
401fc48b7a6SYuval Mintz 	command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
402fc48b7a6SYuval Mintz 	command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
403fc48b7a6SYuval Mintz 	command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
404fe56b9e6SYuval Mintz 
405fe56b9e6SYuval Mintz 	/* Make sure notification is not set before initiating final cleanup */
406fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, addr)) {
407fe56b9e6SYuval Mintz 		DP_NOTICE(
408fe56b9e6SYuval Mintz 			p_hwfn,
409fe56b9e6SYuval Mintz 			"Unexpected; Found final cleanup notification before initiating final cleanup\n");
410fe56b9e6SYuval Mintz 		REG_WR(p_hwfn, addr, 0);
411fe56b9e6SYuval Mintz 	}
412fe56b9e6SYuval Mintz 
413fe56b9e6SYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
414fe56b9e6SYuval Mintz 		   "Sending final cleanup for PFVF[%d] [Command %08x\n]",
415fe56b9e6SYuval Mintz 		   id, command);
416fe56b9e6SYuval Mintz 
417fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
418fe56b9e6SYuval Mintz 
419fe56b9e6SYuval Mintz 	/* Poll until completion */
420fe56b9e6SYuval Mintz 	while (!REG_RD(p_hwfn, addr) && count--)
421fe56b9e6SYuval Mintz 		msleep(FINAL_CLEANUP_POLL_TIME);
422fe56b9e6SYuval Mintz 
423fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, addr))
424fe56b9e6SYuval Mintz 		rc = 0;
425fe56b9e6SYuval Mintz 	else
426fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn,
427fe56b9e6SYuval Mintz 			  "Failed to receive FW final cleanup notification\n");
428fe56b9e6SYuval Mintz 
429fe56b9e6SYuval Mintz 	/* Cleanup afterwards */
430fe56b9e6SYuval Mintz 	REG_WR(p_hwfn, addr, 0);
431fe56b9e6SYuval Mintz 
432fe56b9e6SYuval Mintz 	return rc;
433fe56b9e6SYuval Mintz }
434fe56b9e6SYuval Mintz 
435fe56b9e6SYuval Mintz static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
436fe56b9e6SYuval Mintz {
437fe56b9e6SYuval Mintz 	int hw_mode = 0;
438fe56b9e6SYuval Mintz 
43912e09c69SYuval Mintz 	hw_mode = (1 << MODE_BB_B0);
440fe56b9e6SYuval Mintz 
441fe56b9e6SYuval Mintz 	switch (p_hwfn->cdev->num_ports_in_engines) {
442fe56b9e6SYuval Mintz 	case 1:
443fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
444fe56b9e6SYuval Mintz 		break;
445fe56b9e6SYuval Mintz 	case 2:
446fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
447fe56b9e6SYuval Mintz 		break;
448fe56b9e6SYuval Mintz 	case 4:
449fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
450fe56b9e6SYuval Mintz 		break;
451fe56b9e6SYuval Mintz 	default:
452fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
453fe56b9e6SYuval Mintz 			  p_hwfn->cdev->num_ports_in_engines);
454fe56b9e6SYuval Mintz 		return;
455fe56b9e6SYuval Mintz 	}
456fe56b9e6SYuval Mintz 
457fe56b9e6SYuval Mintz 	switch (p_hwfn->cdev->mf_mode) {
458fc48b7a6SYuval Mintz 	case QED_MF_DEFAULT:
459fc48b7a6SYuval Mintz 	case QED_MF_NPAR:
460fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SI;
461fe56b9e6SYuval Mintz 		break;
462fc48b7a6SYuval Mintz 	case QED_MF_OVLAN:
463fc48b7a6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SD;
464fc48b7a6SYuval Mintz 		break;
465fe56b9e6SYuval Mintz 	default:
466fc48b7a6SYuval Mintz 		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
467fc48b7a6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SI;
468fe56b9e6SYuval Mintz 	}
469fe56b9e6SYuval Mintz 
470fe56b9e6SYuval Mintz 	hw_mode |= 1 << MODE_ASIC;
471fe56b9e6SYuval Mintz 
472fe56b9e6SYuval Mintz 	p_hwfn->hw_info.hw_mode = hw_mode;
473fe56b9e6SYuval Mintz }
474fe56b9e6SYuval Mintz 
475fe56b9e6SYuval Mintz /* Init run time data for all PFs on an engine. */
476fe56b9e6SYuval Mintz static void qed_init_cau_rt_data(struct qed_dev *cdev)
477fe56b9e6SYuval Mintz {
478fe56b9e6SYuval Mintz 	u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
479fe56b9e6SYuval Mintz 	int i, sb_id;
480fe56b9e6SYuval Mintz 
481fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
482fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
483fe56b9e6SYuval Mintz 		struct qed_igu_info *p_igu_info;
484fe56b9e6SYuval Mintz 		struct qed_igu_block *p_block;
485fe56b9e6SYuval Mintz 		struct cau_sb_entry sb_entry;
486fe56b9e6SYuval Mintz 
487fe56b9e6SYuval Mintz 		p_igu_info = p_hwfn->hw_info.p_igu_info;
488fe56b9e6SYuval Mintz 
489fe56b9e6SYuval Mintz 		for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
490fe56b9e6SYuval Mintz 		     sb_id++) {
491fe56b9e6SYuval Mintz 			p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
492fe56b9e6SYuval Mintz 			if (!p_block->is_pf)
493fe56b9e6SYuval Mintz 				continue;
494fe56b9e6SYuval Mintz 
495fe56b9e6SYuval Mintz 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
496fe56b9e6SYuval Mintz 					      p_block->function_id,
497fe56b9e6SYuval Mintz 					      0, 0);
498fe56b9e6SYuval Mintz 			STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
499fe56b9e6SYuval Mintz 					 sb_entry);
500fe56b9e6SYuval Mintz 		}
501fe56b9e6SYuval Mintz 	}
502fe56b9e6SYuval Mintz }
503fe56b9e6SYuval Mintz 
504fe56b9e6SYuval Mintz static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
505fe56b9e6SYuval Mintz 			      struct qed_ptt *p_ptt,
506fe56b9e6SYuval Mintz 			      int hw_mode)
507fe56b9e6SYuval Mintz {
508fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
509fe56b9e6SYuval Mintz 	struct qed_qm_common_rt_init_params params;
510fe56b9e6SYuval Mintz 	struct qed_dev *cdev = p_hwfn->cdev;
511fe56b9e6SYuval Mintz 	int rc = 0;
512fe56b9e6SYuval Mintz 
513fe56b9e6SYuval Mintz 	qed_init_cau_rt_data(cdev);
514fe56b9e6SYuval Mintz 
515fe56b9e6SYuval Mintz 	/* Program GTT windows */
516fe56b9e6SYuval Mintz 	qed_gtt_init(p_hwfn);
517fe56b9e6SYuval Mintz 
518fe56b9e6SYuval Mintz 	if (p_hwfn->mcp_info) {
519fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.bandwidth_max)
520fe56b9e6SYuval Mintz 			qm_info->pf_rl_en = 1;
521fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.bandwidth_min)
522fe56b9e6SYuval Mintz 			qm_info->pf_wfq_en = 1;
523fe56b9e6SYuval Mintz 	}
524fe56b9e6SYuval Mintz 
525fe56b9e6SYuval Mintz 	memset(&params, 0, sizeof(params));
526fe56b9e6SYuval Mintz 	params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
527fe56b9e6SYuval Mintz 	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
528fe56b9e6SYuval Mintz 	params.pf_rl_en = qm_info->pf_rl_en;
529fe56b9e6SYuval Mintz 	params.pf_wfq_en = qm_info->pf_wfq_en;
530fe56b9e6SYuval Mintz 	params.vport_rl_en = qm_info->vport_rl_en;
531fe56b9e6SYuval Mintz 	params.vport_wfq_en = qm_info->vport_wfq_en;
532fe56b9e6SYuval Mintz 	params.port_params = qm_info->qm_port_params;
533fe56b9e6SYuval Mintz 
534fe56b9e6SYuval Mintz 	qed_qm_common_rt_init(p_hwfn, &params);
535fe56b9e6SYuval Mintz 
536fe56b9e6SYuval Mintz 	qed_cxt_hw_init_common(p_hwfn);
537fe56b9e6SYuval Mintz 
538fe56b9e6SYuval Mintz 	/* Close gate from NIG to BRB/Storm; By default they are open, but
539fe56b9e6SYuval Mintz 	 * we close them to prevent NIG from passing data to reset blocks.
540fe56b9e6SYuval Mintz 	 * Should have been done in the ENGINE phase, but init-tool lacks
541fe56b9e6SYuval Mintz 	 * proper port-pretend capabilities.
542fe56b9e6SYuval Mintz 	 */
543fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
544fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
545fe56b9e6SYuval Mintz 	qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
546fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
547fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
548fe56b9e6SYuval Mintz 	qed_port_unpretend(p_hwfn, p_ptt);
549fe56b9e6SYuval Mintz 
550fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
551fe56b9e6SYuval Mintz 	if (rc != 0)
552fe56b9e6SYuval Mintz 		return rc;
553fe56b9e6SYuval Mintz 
554fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
555fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
556fe56b9e6SYuval Mintz 
557fe56b9e6SYuval Mintz 	/* Disable relaxed ordering in the PCI config space */
558fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, 0x20b4,
559fe56b9e6SYuval Mintz 	       qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
560fe56b9e6SYuval Mintz 
561fe56b9e6SYuval Mintz 	return rc;
562fe56b9e6SYuval Mintz }
563fe56b9e6SYuval Mintz 
564fe56b9e6SYuval Mintz static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
565fe56b9e6SYuval Mintz 			    struct qed_ptt *p_ptt,
566fe56b9e6SYuval Mintz 			    int hw_mode)
567fe56b9e6SYuval Mintz {
568fe56b9e6SYuval Mintz 	int rc = 0;
569fe56b9e6SYuval Mintz 
570fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
571fe56b9e6SYuval Mintz 			  hw_mode);
572fe56b9e6SYuval Mintz 	return rc;
573fe56b9e6SYuval Mintz }
574fe56b9e6SYuval Mintz 
575fe56b9e6SYuval Mintz static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
576fe56b9e6SYuval Mintz 			  struct qed_ptt *p_ptt,
577464f6645SManish Chopra 			  struct qed_tunn_start_params *p_tunn,
578fe56b9e6SYuval Mintz 			  int hw_mode,
579fe56b9e6SYuval Mintz 			  bool b_hw_start,
580fe56b9e6SYuval Mintz 			  enum qed_int_mode int_mode,
581fe56b9e6SYuval Mintz 			  bool allow_npar_tx_switch)
582fe56b9e6SYuval Mintz {
583fe56b9e6SYuval Mintz 	u8 rel_pf_id = p_hwfn->rel_pf_id;
584fe56b9e6SYuval Mintz 	int rc = 0;
585fe56b9e6SYuval Mintz 
586fe56b9e6SYuval Mintz 	if (p_hwfn->mcp_info) {
587fe56b9e6SYuval Mintz 		struct qed_mcp_function_info *p_info;
588fe56b9e6SYuval Mintz 
589fe56b9e6SYuval Mintz 		p_info = &p_hwfn->mcp_info->func_info;
590fe56b9e6SYuval Mintz 		if (p_info->bandwidth_min)
591fe56b9e6SYuval Mintz 			p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
592fe56b9e6SYuval Mintz 
593fe56b9e6SYuval Mintz 		/* Update rate limit once we'll actually have a link */
5944b01e519SManish Chopra 		p_hwfn->qm_info.pf_rl = 100000;
595fe56b9e6SYuval Mintz 	}
596fe56b9e6SYuval Mintz 
597fe56b9e6SYuval Mintz 	qed_cxt_hw_init_pf(p_hwfn);
598fe56b9e6SYuval Mintz 
599fe56b9e6SYuval Mintz 	qed_int_igu_init_rt(p_hwfn);
600fe56b9e6SYuval Mintz 
601fe56b9e6SYuval Mintz 	/* Set VLAN in NIG if needed */
602fe56b9e6SYuval Mintz 	if (hw_mode & (1 << MODE_MF_SD)) {
603fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
604fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
605fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
606fe56b9e6SYuval Mintz 			     p_hwfn->hw_info.ovlan);
607fe56b9e6SYuval Mintz 	}
608fe56b9e6SYuval Mintz 
609fe56b9e6SYuval Mintz 	/* Enable classification by MAC if needed */
61087aec47dSDan Carpenter 	if (hw_mode & (1 << MODE_MF_SI)) {
611fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
612fe56b9e6SYuval Mintz 			   "Configuring TAGMAC_CLS_TYPE\n");
613fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn,
614fe56b9e6SYuval Mintz 			     NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
615fe56b9e6SYuval Mintz 	}
616fe56b9e6SYuval Mintz 
617fe56b9e6SYuval Mintz 	/* Protocl Configuration  */
618fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
619fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
620fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
621fe56b9e6SYuval Mintz 
622fe56b9e6SYuval Mintz 	/* Cleanup chip from previous driver if such remains exist */
623fe56b9e6SYuval Mintz 	rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
624fe56b9e6SYuval Mintz 	if (rc != 0)
625fe56b9e6SYuval Mintz 		return rc;
626fe56b9e6SYuval Mintz 
627fe56b9e6SYuval Mintz 	/* PF Init sequence */
628fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
629fe56b9e6SYuval Mintz 	if (rc)
630fe56b9e6SYuval Mintz 		return rc;
631fe56b9e6SYuval Mintz 
632fe56b9e6SYuval Mintz 	/* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
633fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
634fe56b9e6SYuval Mintz 	if (rc)
635fe56b9e6SYuval Mintz 		return rc;
636fe56b9e6SYuval Mintz 
637fe56b9e6SYuval Mintz 	/* Pure runtime initializations - directly to the HW  */
638fe56b9e6SYuval Mintz 	qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
639fe56b9e6SYuval Mintz 
640fe56b9e6SYuval Mintz 	if (b_hw_start) {
641fe56b9e6SYuval Mintz 		/* enable interrupts */
642fe56b9e6SYuval Mintz 		qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
643fe56b9e6SYuval Mintz 
644fe56b9e6SYuval Mintz 		/* send function start command */
645464f6645SManish Chopra 		rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode);
646fe56b9e6SYuval Mintz 		if (rc)
647fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
648fe56b9e6SYuval Mintz 	}
649fe56b9e6SYuval Mintz 	return rc;
650fe56b9e6SYuval Mintz }
651fe56b9e6SYuval Mintz 
652fe56b9e6SYuval Mintz static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
653fe56b9e6SYuval Mintz 			       struct qed_ptt *p_ptt,
654fe56b9e6SYuval Mintz 			       u8 enable)
655fe56b9e6SYuval Mintz {
656fe56b9e6SYuval Mintz 	u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
657fe56b9e6SYuval Mintz 
658fe56b9e6SYuval Mintz 	/* Change PF in PXP */
659fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt,
660fe56b9e6SYuval Mintz 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
661fe56b9e6SYuval Mintz 
662fe56b9e6SYuval Mintz 	/* wait until value is set - try for 1 second every 50us */
663fe56b9e6SYuval Mintz 	for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
664fe56b9e6SYuval Mintz 		val = qed_rd(p_hwfn, p_ptt,
665fe56b9e6SYuval Mintz 			     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
666fe56b9e6SYuval Mintz 		if (val == set_val)
667fe56b9e6SYuval Mintz 			break;
668fe56b9e6SYuval Mintz 
669fe56b9e6SYuval Mintz 		usleep_range(50, 60);
670fe56b9e6SYuval Mintz 	}
671fe56b9e6SYuval Mintz 
672fe56b9e6SYuval Mintz 	if (val != set_val) {
673fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn,
674fe56b9e6SYuval Mintz 			  "PFID_ENABLE_MASTER wasn't changed after a second\n");
675fe56b9e6SYuval Mintz 		return -EAGAIN;
676fe56b9e6SYuval Mintz 	}
677fe56b9e6SYuval Mintz 
678fe56b9e6SYuval Mintz 	return 0;
679fe56b9e6SYuval Mintz }
680fe56b9e6SYuval Mintz 
681fe56b9e6SYuval Mintz static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
682fe56b9e6SYuval Mintz 				struct qed_ptt *p_main_ptt)
683fe56b9e6SYuval Mintz {
684fe56b9e6SYuval Mintz 	/* Read shadow of current MFW mailbox */
685fe56b9e6SYuval Mintz 	qed_mcp_read_mb(p_hwfn, p_main_ptt);
686fe56b9e6SYuval Mintz 	memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
687fe56b9e6SYuval Mintz 	       p_hwfn->mcp_info->mfw_mb_cur,
688fe56b9e6SYuval Mintz 	       p_hwfn->mcp_info->mfw_mb_length);
689fe56b9e6SYuval Mintz }
690fe56b9e6SYuval Mintz 
691fe56b9e6SYuval Mintz int qed_hw_init(struct qed_dev *cdev,
692464f6645SManish Chopra 		struct qed_tunn_start_params *p_tunn,
693fe56b9e6SYuval Mintz 		bool b_hw_start,
694fe56b9e6SYuval Mintz 		enum qed_int_mode int_mode,
695fe56b9e6SYuval Mintz 		bool allow_npar_tx_switch,
696fe56b9e6SYuval Mintz 		const u8 *bin_fw_data)
697fe56b9e6SYuval Mintz {
69886622ee7SYuval Mintz 	u32 load_code, param;
699fe56b9e6SYuval Mintz 	int rc, mfw_rc, i;
700fe56b9e6SYuval Mintz 
701fe56b9e6SYuval Mintz 	rc = qed_init_fw_data(cdev, bin_fw_data);
702fe56b9e6SYuval Mintz 	if (rc != 0)
703fe56b9e6SYuval Mintz 		return rc;
704fe56b9e6SYuval Mintz 
705fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
706fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
707fe56b9e6SYuval Mintz 
708fe56b9e6SYuval Mintz 		/* Enable DMAE in PXP */
709fe56b9e6SYuval Mintz 		rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
710fe56b9e6SYuval Mintz 
711fe56b9e6SYuval Mintz 		qed_calc_hw_mode(p_hwfn);
712fe56b9e6SYuval Mintz 
713fe56b9e6SYuval Mintz 		rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
714fe56b9e6SYuval Mintz 				      &load_code);
715fe56b9e6SYuval Mintz 		if (rc) {
716fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
717fe56b9e6SYuval Mintz 			return rc;
718fe56b9e6SYuval Mintz 		}
719fe56b9e6SYuval Mintz 
720fe56b9e6SYuval Mintz 		qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
721fe56b9e6SYuval Mintz 
722fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
723fe56b9e6SYuval Mintz 			   "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
724fe56b9e6SYuval Mintz 			   rc, load_code);
725fe56b9e6SYuval Mintz 
726fe56b9e6SYuval Mintz 		p_hwfn->first_on_engine = (load_code ==
727fe56b9e6SYuval Mintz 					   FW_MSG_CODE_DRV_LOAD_ENGINE);
728fe56b9e6SYuval Mintz 
729fe56b9e6SYuval Mintz 		switch (load_code) {
730fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_ENGINE:
731fe56b9e6SYuval Mintz 			rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
732fe56b9e6SYuval Mintz 						p_hwfn->hw_info.hw_mode);
733fe56b9e6SYuval Mintz 			if (rc)
734fe56b9e6SYuval Mintz 				break;
735fe56b9e6SYuval Mintz 		/* Fall into */
736fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_PORT:
737fe56b9e6SYuval Mintz 			rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
738fe56b9e6SYuval Mintz 					      p_hwfn->hw_info.hw_mode);
739fe56b9e6SYuval Mintz 			if (rc)
740fe56b9e6SYuval Mintz 				break;
741fe56b9e6SYuval Mintz 
742fe56b9e6SYuval Mintz 		/* Fall into */
743fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
744fe56b9e6SYuval Mintz 			rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
745464f6645SManish Chopra 					    p_tunn, p_hwfn->hw_info.hw_mode,
746fe56b9e6SYuval Mintz 					    b_hw_start, int_mode,
747fe56b9e6SYuval Mintz 					    allow_npar_tx_switch);
748fe56b9e6SYuval Mintz 			break;
749fe56b9e6SYuval Mintz 		default:
750fe56b9e6SYuval Mintz 			rc = -EINVAL;
751fe56b9e6SYuval Mintz 			break;
752fe56b9e6SYuval Mintz 		}
753fe56b9e6SYuval Mintz 
754fe56b9e6SYuval Mintz 		if (rc)
755fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn,
756fe56b9e6SYuval Mintz 				  "init phase failed for loadcode 0x%x (rc %d)\n",
757fe56b9e6SYuval Mintz 				   load_code, rc);
758fe56b9e6SYuval Mintz 
759fe56b9e6SYuval Mintz 		/* ACK mfw regardless of success or failure of initialization */
760fe56b9e6SYuval Mintz 		mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
761fe56b9e6SYuval Mintz 				     DRV_MSG_CODE_LOAD_DONE,
762fe56b9e6SYuval Mintz 				     0, &load_code, &param);
763fe56b9e6SYuval Mintz 		if (rc)
764fe56b9e6SYuval Mintz 			return rc;
765fe56b9e6SYuval Mintz 		if (mfw_rc) {
766fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
767fe56b9e6SYuval Mintz 			return mfw_rc;
768fe56b9e6SYuval Mintz 		}
769fe56b9e6SYuval Mintz 
770fe56b9e6SYuval Mintz 		p_hwfn->hw_init_done = true;
771fe56b9e6SYuval Mintz 	}
772fe56b9e6SYuval Mintz 
773fe56b9e6SYuval Mintz 	return 0;
774fe56b9e6SYuval Mintz }
775fe56b9e6SYuval Mintz 
776fe56b9e6SYuval Mintz #define QED_HW_STOP_RETRY_LIMIT (10)
7778c925c44SYuval Mintz static inline void qed_hw_timers_stop(struct qed_dev *cdev,
7788c925c44SYuval Mintz 				      struct qed_hwfn *p_hwfn,
7798c925c44SYuval Mintz 				      struct qed_ptt *p_ptt)
7808c925c44SYuval Mintz {
7818c925c44SYuval Mintz 	int i;
7828c925c44SYuval Mintz 
7838c925c44SYuval Mintz 	/* close timers */
7848c925c44SYuval Mintz 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
7858c925c44SYuval Mintz 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
7868c925c44SYuval Mintz 
7878c925c44SYuval Mintz 	for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
7888c925c44SYuval Mintz 		if ((!qed_rd(p_hwfn, p_ptt,
7898c925c44SYuval Mintz 			     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
7908c925c44SYuval Mintz 		    (!qed_rd(p_hwfn, p_ptt,
7918c925c44SYuval Mintz 			     TM_REG_PF_SCAN_ACTIVE_TASK)))
7928c925c44SYuval Mintz 			break;
7938c925c44SYuval Mintz 
7948c925c44SYuval Mintz 		/* Dependent on number of connection/tasks, possibly
7958c925c44SYuval Mintz 		 * 1ms sleep is required between polls
7968c925c44SYuval Mintz 		 */
7978c925c44SYuval Mintz 		usleep_range(1000, 2000);
7988c925c44SYuval Mintz 	}
7998c925c44SYuval Mintz 
8008c925c44SYuval Mintz 	if (i < QED_HW_STOP_RETRY_LIMIT)
8018c925c44SYuval Mintz 		return;
8028c925c44SYuval Mintz 
8038c925c44SYuval Mintz 	DP_NOTICE(p_hwfn,
8048c925c44SYuval Mintz 		  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
8058c925c44SYuval Mintz 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
8068c925c44SYuval Mintz 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
8078c925c44SYuval Mintz }
8088c925c44SYuval Mintz 
8098c925c44SYuval Mintz void qed_hw_timers_stop_all(struct qed_dev *cdev)
8108c925c44SYuval Mintz {
8118c925c44SYuval Mintz 	int j;
8128c925c44SYuval Mintz 
8138c925c44SYuval Mintz 	for_each_hwfn(cdev, j) {
8148c925c44SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
8158c925c44SYuval Mintz 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
8168c925c44SYuval Mintz 
8178c925c44SYuval Mintz 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
8188c925c44SYuval Mintz 	}
8198c925c44SYuval Mintz }
8208c925c44SYuval Mintz 
821fe56b9e6SYuval Mintz int qed_hw_stop(struct qed_dev *cdev)
822fe56b9e6SYuval Mintz {
823fe56b9e6SYuval Mintz 	int rc = 0, t_rc;
8248c925c44SYuval Mintz 	int j;
825fe56b9e6SYuval Mintz 
826fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, j) {
827fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
828fe56b9e6SYuval Mintz 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
829fe56b9e6SYuval Mintz 
830fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
831fe56b9e6SYuval Mintz 
832fe56b9e6SYuval Mintz 		/* mark the hw as uninitialized... */
833fe56b9e6SYuval Mintz 		p_hwfn->hw_init_done = false;
834fe56b9e6SYuval Mintz 
835fe56b9e6SYuval Mintz 		rc = qed_sp_pf_stop(p_hwfn);
836fe56b9e6SYuval Mintz 		if (rc)
8378c925c44SYuval Mintz 			DP_NOTICE(p_hwfn,
8388c925c44SYuval Mintz 				  "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
839fe56b9e6SYuval Mintz 
840fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt,
841fe56b9e6SYuval Mintz 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
842fe56b9e6SYuval Mintz 
843fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
844fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
845fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
846fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
847fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
848fe56b9e6SYuval Mintz 
8498c925c44SYuval Mintz 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
850fe56b9e6SYuval Mintz 
851fe56b9e6SYuval Mintz 		/* Disable Attention Generation */
852fe56b9e6SYuval Mintz 		qed_int_igu_disable_int(p_hwfn, p_ptt);
853fe56b9e6SYuval Mintz 
854fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
855fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
856fe56b9e6SYuval Mintz 
857fe56b9e6SYuval Mintz 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
858fe56b9e6SYuval Mintz 
859fe56b9e6SYuval Mintz 		/* Need to wait 1ms to guarantee SBs are cleared */
860fe56b9e6SYuval Mintz 		usleep_range(1000, 2000);
861fe56b9e6SYuval Mintz 	}
862fe56b9e6SYuval Mintz 
863fe56b9e6SYuval Mintz 	/* Disable DMAE in PXP - in CMT, this should only be done for
864fe56b9e6SYuval Mintz 	 * first hw-function, and only after all transactions have
865fe56b9e6SYuval Mintz 	 * stopped for all active hw-functions.
866fe56b9e6SYuval Mintz 	 */
867fe56b9e6SYuval Mintz 	t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
868fe56b9e6SYuval Mintz 				   cdev->hwfns[0].p_main_ptt,
869fe56b9e6SYuval Mintz 				   false);
870fe56b9e6SYuval Mintz 	if (t_rc != 0)
871fe56b9e6SYuval Mintz 		rc = t_rc;
872fe56b9e6SYuval Mintz 
873fe56b9e6SYuval Mintz 	return rc;
874fe56b9e6SYuval Mintz }
875fe56b9e6SYuval Mintz 
876cee4d264SManish Chopra void qed_hw_stop_fastpath(struct qed_dev *cdev)
877cee4d264SManish Chopra {
8788c925c44SYuval Mintz 	int j;
879cee4d264SManish Chopra 
880cee4d264SManish Chopra 	for_each_hwfn(cdev, j) {
881cee4d264SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
882cee4d264SManish Chopra 		struct qed_ptt *p_ptt   = p_hwfn->p_main_ptt;
883cee4d264SManish Chopra 
884cee4d264SManish Chopra 		DP_VERBOSE(p_hwfn,
885cee4d264SManish Chopra 			   NETIF_MSG_IFDOWN,
886cee4d264SManish Chopra 			   "Shutting down the fastpath\n");
887cee4d264SManish Chopra 
888cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt,
889cee4d264SManish Chopra 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
890cee4d264SManish Chopra 
891cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
892cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
893cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
894cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
895cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
896cee4d264SManish Chopra 
897cee4d264SManish Chopra 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
898cee4d264SManish Chopra 
899cee4d264SManish Chopra 		/* Need to wait 1ms to guarantee SBs are cleared */
900cee4d264SManish Chopra 		usleep_range(1000, 2000);
901cee4d264SManish Chopra 	}
902cee4d264SManish Chopra }
903cee4d264SManish Chopra 
904cee4d264SManish Chopra void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
905cee4d264SManish Chopra {
906cee4d264SManish Chopra 	/* Re-open incoming traffic */
907cee4d264SManish Chopra 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
908cee4d264SManish Chopra 	       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
909cee4d264SManish Chopra }
910cee4d264SManish Chopra 
911fe56b9e6SYuval Mintz static int qed_reg_assert(struct qed_hwfn *hwfn,
912fe56b9e6SYuval Mintz 			  struct qed_ptt *ptt, u32 reg,
913fe56b9e6SYuval Mintz 			  bool expected)
914fe56b9e6SYuval Mintz {
915fe56b9e6SYuval Mintz 	u32 assert_val = qed_rd(hwfn, ptt, reg);
916fe56b9e6SYuval Mintz 
917fe56b9e6SYuval Mintz 	if (assert_val != expected) {
918fe56b9e6SYuval Mintz 		DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
919fe56b9e6SYuval Mintz 			  reg, expected);
920fe56b9e6SYuval Mintz 		return -EINVAL;
921fe56b9e6SYuval Mintz 	}
922fe56b9e6SYuval Mintz 
923fe56b9e6SYuval Mintz 	return 0;
924fe56b9e6SYuval Mintz }
925fe56b9e6SYuval Mintz 
926fe56b9e6SYuval Mintz int qed_hw_reset(struct qed_dev *cdev)
927fe56b9e6SYuval Mintz {
928fe56b9e6SYuval Mintz 	int rc = 0;
929fe56b9e6SYuval Mintz 	u32 unload_resp, unload_param;
930fe56b9e6SYuval Mintz 	int i;
931fe56b9e6SYuval Mintz 
932fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
933fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
934fe56b9e6SYuval Mintz 
935fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
936fe56b9e6SYuval Mintz 
937fe56b9e6SYuval Mintz 		/* Check for incorrect states */
938fe56b9e6SYuval Mintz 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
939fe56b9e6SYuval Mintz 			       QM_REG_USG_CNT_PF_TX, 0);
940fe56b9e6SYuval Mintz 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
941fe56b9e6SYuval Mintz 			       QM_REG_USG_CNT_PF_OTHER, 0);
942fe56b9e6SYuval Mintz 
943fe56b9e6SYuval Mintz 		/* Disable PF in HW blocks */
944fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
945fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
946fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
947fe56b9e6SYuval Mintz 		       TCFC_REG_STRONG_ENABLE_PF, 0);
948fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
949fe56b9e6SYuval Mintz 		       CCFC_REG_STRONG_ENABLE_PF, 0);
950fe56b9e6SYuval Mintz 
951fe56b9e6SYuval Mintz 		/* Send unload command to MCP */
952fe56b9e6SYuval Mintz 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
953fe56b9e6SYuval Mintz 				 DRV_MSG_CODE_UNLOAD_REQ,
954fe56b9e6SYuval Mintz 				 DRV_MB_PARAM_UNLOAD_WOL_MCP,
955fe56b9e6SYuval Mintz 				 &unload_resp, &unload_param);
956fe56b9e6SYuval Mintz 		if (rc) {
957fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
958fe56b9e6SYuval Mintz 			unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
959fe56b9e6SYuval Mintz 		}
960fe56b9e6SYuval Mintz 
961fe56b9e6SYuval Mintz 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
962fe56b9e6SYuval Mintz 				 DRV_MSG_CODE_UNLOAD_DONE,
963fe56b9e6SYuval Mintz 				 0, &unload_resp, &unload_param);
964fe56b9e6SYuval Mintz 		if (rc) {
965fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
966fe56b9e6SYuval Mintz 			return rc;
967fe56b9e6SYuval Mintz 		}
968fe56b9e6SYuval Mintz 	}
969fe56b9e6SYuval Mintz 
970fe56b9e6SYuval Mintz 	return rc;
971fe56b9e6SYuval Mintz }
972fe56b9e6SYuval Mintz 
973fe56b9e6SYuval Mintz /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
974fe56b9e6SYuval Mintz static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
975fe56b9e6SYuval Mintz {
976fe56b9e6SYuval Mintz 	qed_ptt_pool_free(p_hwfn);
977fe56b9e6SYuval Mintz 	kfree(p_hwfn->hw_info.p_igu_info);
978fe56b9e6SYuval Mintz }
979fe56b9e6SYuval Mintz 
980fe56b9e6SYuval Mintz /* Setup bar access */
98112e09c69SYuval Mintz static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
982fe56b9e6SYuval Mintz {
983fe56b9e6SYuval Mintz 	/* clear indirect access */
984fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
985fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
986fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
987fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
988fe56b9e6SYuval Mintz 
989fe56b9e6SYuval Mintz 	/* Clean Previous errors if such exist */
990fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
991fe56b9e6SYuval Mintz 	       PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
992fe56b9e6SYuval Mintz 	       1 << p_hwfn->abs_pf_id);
993fe56b9e6SYuval Mintz 
994fe56b9e6SYuval Mintz 	/* enable internal target-read */
995fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
996fe56b9e6SYuval Mintz 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
997fe56b9e6SYuval Mintz }
998fe56b9e6SYuval Mintz 
999fe56b9e6SYuval Mintz static void get_function_id(struct qed_hwfn *p_hwfn)
1000fe56b9e6SYuval Mintz {
1001fe56b9e6SYuval Mintz 	/* ME Register */
1002fe56b9e6SYuval Mintz 	p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
1003fe56b9e6SYuval Mintz 
1004fe56b9e6SYuval Mintz 	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
1005fe56b9e6SYuval Mintz 
1006fe56b9e6SYuval Mintz 	p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
1007fe56b9e6SYuval Mintz 	p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1008fe56b9e6SYuval Mintz 				      PXP_CONCRETE_FID_PFID);
1009fe56b9e6SYuval Mintz 	p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1010fe56b9e6SYuval Mintz 				    PXP_CONCRETE_FID_PORT);
1011fe56b9e6SYuval Mintz }
1012fe56b9e6SYuval Mintz 
101325c089d7SYuval Mintz static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
101425c089d7SYuval Mintz {
101525c089d7SYuval Mintz 	u32 *feat_num = p_hwfn->hw_info.feat_num;
101625c089d7SYuval Mintz 	int num_features = 1;
101725c089d7SYuval Mintz 
101825c089d7SYuval Mintz 	feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
101925c089d7SYuval Mintz 						num_features,
102025c089d7SYuval Mintz 					RESC_NUM(p_hwfn, QED_L2_QUEUE));
102125c089d7SYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
102225c089d7SYuval Mintz 		   "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
102325c089d7SYuval Mintz 		   feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
102425c089d7SYuval Mintz 		   num_features);
102525c089d7SYuval Mintz }
102625c089d7SYuval Mintz 
1027fe56b9e6SYuval Mintz static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1028fe56b9e6SYuval Mintz {
1029fe56b9e6SYuval Mintz 	u32 *resc_start = p_hwfn->hw_info.resc_start;
1030fe56b9e6SYuval Mintz 	u32 *resc_num = p_hwfn->hw_info.resc_num;
10314ac801b7SYuval Mintz 	struct qed_sb_cnt_info sb_cnt_info;
1032fe56b9e6SYuval Mintz 	int num_funcs, i;
1033fe56b9e6SYuval Mintz 
1034fc48b7a6SYuval Mintz 	num_funcs = MAX_NUM_PFS_BB;
1035fe56b9e6SYuval Mintz 
10364ac801b7SYuval Mintz 	memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
10374ac801b7SYuval Mintz 	qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
10384ac801b7SYuval Mintz 
1039fe56b9e6SYuval Mintz 	resc_num[QED_SB] = min_t(u32,
1040fe56b9e6SYuval Mintz 				 (MAX_SB_PER_PATH_BB / num_funcs),
10414ac801b7SYuval Mintz 				 sb_cnt_info.sb_cnt);
104225c089d7SYuval Mintz 	resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
1043fe56b9e6SYuval Mintz 	resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
104425c089d7SYuval Mintz 	resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
1045fe56b9e6SYuval Mintz 	resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
1046fe56b9e6SYuval Mintz 	resc_num[QED_RL] = 8;
104725c089d7SYuval Mintz 	resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
104825c089d7SYuval Mintz 	resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
104925c089d7SYuval Mintz 			     num_funcs;
1050fe56b9e6SYuval Mintz 	resc_num[QED_ILT] = 950;
1051fe56b9e6SYuval Mintz 
1052fe56b9e6SYuval Mintz 	for (i = 0; i < QED_MAX_RESC; i++)
1053fe56b9e6SYuval Mintz 		resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1054fe56b9e6SYuval Mintz 
105525c089d7SYuval Mintz 	qed_hw_set_feat(p_hwfn);
105625c089d7SYuval Mintz 
1057fe56b9e6SYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1058fe56b9e6SYuval Mintz 		   "The numbers for each resource are:\n"
1059fe56b9e6SYuval Mintz 		   "SB = %d start = %d\n"
106025c089d7SYuval Mintz 		   "L2_QUEUE = %d start = %d\n"
1061fe56b9e6SYuval Mintz 		   "VPORT = %d start = %d\n"
1062fe56b9e6SYuval Mintz 		   "PQ = %d start = %d\n"
1063fe56b9e6SYuval Mintz 		   "RL = %d start = %d\n"
106425c089d7SYuval Mintz 		   "MAC = %d start = %d\n"
106525c089d7SYuval Mintz 		   "VLAN = %d start = %d\n"
1066fe56b9e6SYuval Mintz 		   "ILT = %d start = %d\n",
1067fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_SB],
1068fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_SB],
106925c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
107025c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
1071fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_VPORT],
1072fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_VPORT],
1073fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_PQ],
1074fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_PQ],
1075fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_RL],
1076fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_RL],
107725c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_MAC],
107825c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_MAC],
107925c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_VLAN],
108025c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_VLAN],
1081fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_ILT],
1082fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_ILT]);
1083fe56b9e6SYuval Mintz }
1084fe56b9e6SYuval Mintz 
1085fe56b9e6SYuval Mintz static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1086fe56b9e6SYuval Mintz 			       struct qed_ptt *p_ptt)
1087fe56b9e6SYuval Mintz {
1088cc875c2eSYuval Mintz 	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1089fc48b7a6SYuval Mintz 	u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
1090cc875c2eSYuval Mintz 	struct qed_mcp_link_params *link;
1091fe56b9e6SYuval Mintz 
1092fe56b9e6SYuval Mintz 	/* Read global nvm_cfg address */
1093fe56b9e6SYuval Mintz 	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1094fe56b9e6SYuval Mintz 
1095fe56b9e6SYuval Mintz 	/* Verify MCP has initialized it */
1096fe56b9e6SYuval Mintz 	if (!nvm_cfg_addr) {
1097fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1098fe56b9e6SYuval Mintz 		return -EINVAL;
1099fe56b9e6SYuval Mintz 	}
1100fe56b9e6SYuval Mintz 
1101fe56b9e6SYuval Mintz 	/* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
1102fe56b9e6SYuval Mintz 	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1103fe56b9e6SYuval Mintz 
1104cc875c2eSYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1105cc875c2eSYuval Mintz 	       offsetof(struct nvm_cfg1, glob) +
1106cc875c2eSYuval Mintz 	       offsetof(struct nvm_cfg1_glob, core_cfg);
1107cc875c2eSYuval Mintz 
1108cc875c2eSYuval Mintz 	core_cfg = qed_rd(p_hwfn, p_ptt, addr);
1109cc875c2eSYuval Mintz 
1110cc875c2eSYuval Mintz 	switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1111cc875c2eSYuval Mintz 		NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1112cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1113cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1114cc875c2eSYuval Mintz 		break;
1115cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1116cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1117cc875c2eSYuval Mintz 		break;
1118cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1119cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1120cc875c2eSYuval Mintz 		break;
1121cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1122cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1123cc875c2eSYuval Mintz 		break;
1124cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1125cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1126cc875c2eSYuval Mintz 		break;
1127cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1128cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1129cc875c2eSYuval Mintz 		break;
1130cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1131cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1132cc875c2eSYuval Mintz 		break;
1133cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1134cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1135cc875c2eSYuval Mintz 		break;
1136cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1137cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1138cc875c2eSYuval Mintz 		break;
1139cc875c2eSYuval Mintz 	default:
1140cc875c2eSYuval Mintz 		DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
1141cc875c2eSYuval Mintz 			  core_cfg);
1142cc875c2eSYuval Mintz 		break;
1143cc875c2eSYuval Mintz 	}
1144cc875c2eSYuval Mintz 
1145cc875c2eSYuval Mintz 	/* Read default link configuration */
1146cc875c2eSYuval Mintz 	link = &p_hwfn->mcp_info->link_input;
1147cc875c2eSYuval Mintz 	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1148cc875c2eSYuval Mintz 			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1149cc875c2eSYuval Mintz 	link_temp = qed_rd(p_hwfn, p_ptt,
1150cc875c2eSYuval Mintz 			   port_cfg_addr +
1151cc875c2eSYuval Mintz 			   offsetof(struct nvm_cfg1_port, speed_cap_mask));
1152cc875c2eSYuval Mintz 	link->speed.advertised_speeds =
1153cc875c2eSYuval Mintz 		link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1154cc875c2eSYuval Mintz 
1155cc875c2eSYuval Mintz 	p_hwfn->mcp_info->link_capabilities.speed_capabilities =
1156cc875c2eSYuval Mintz 						link->speed.advertised_speeds;
1157cc875c2eSYuval Mintz 
1158cc875c2eSYuval Mintz 	link_temp = qed_rd(p_hwfn, p_ptt,
1159cc875c2eSYuval Mintz 			   port_cfg_addr +
1160cc875c2eSYuval Mintz 			   offsetof(struct nvm_cfg1_port, link_settings));
1161cc875c2eSYuval Mintz 	switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1162cc875c2eSYuval Mintz 		NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1163cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1164cc875c2eSYuval Mintz 		link->speed.autoneg = true;
1165cc875c2eSYuval Mintz 		break;
1166cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1167cc875c2eSYuval Mintz 		link->speed.forced_speed = 1000;
1168cc875c2eSYuval Mintz 		break;
1169cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1170cc875c2eSYuval Mintz 		link->speed.forced_speed = 10000;
1171cc875c2eSYuval Mintz 		break;
1172cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1173cc875c2eSYuval Mintz 		link->speed.forced_speed = 25000;
1174cc875c2eSYuval Mintz 		break;
1175cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1176cc875c2eSYuval Mintz 		link->speed.forced_speed = 40000;
1177cc875c2eSYuval Mintz 		break;
1178cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1179cc875c2eSYuval Mintz 		link->speed.forced_speed = 50000;
1180cc875c2eSYuval Mintz 		break;
1181cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1182cc875c2eSYuval Mintz 		link->speed.forced_speed = 100000;
1183cc875c2eSYuval Mintz 		break;
1184cc875c2eSYuval Mintz 	default:
1185cc875c2eSYuval Mintz 		DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
1186cc875c2eSYuval Mintz 			  link_temp);
1187cc875c2eSYuval Mintz 	}
1188cc875c2eSYuval Mintz 
1189cc875c2eSYuval Mintz 	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1190cc875c2eSYuval Mintz 	link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1191cc875c2eSYuval Mintz 	link->pause.autoneg = !!(link_temp &
1192cc875c2eSYuval Mintz 				 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1193cc875c2eSYuval Mintz 	link->pause.forced_rx = !!(link_temp &
1194cc875c2eSYuval Mintz 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1195cc875c2eSYuval Mintz 	link->pause.forced_tx = !!(link_temp &
1196cc875c2eSYuval Mintz 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1197cc875c2eSYuval Mintz 	link->loopback_mode = 0;
1198cc875c2eSYuval Mintz 
1199cc875c2eSYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1200cc875c2eSYuval Mintz 		   "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1201cc875c2eSYuval Mintz 		   link->speed.forced_speed, link->speed.advertised_speeds,
1202cc875c2eSYuval Mintz 		   link->speed.autoneg, link->pause.autoneg);
1203cc875c2eSYuval Mintz 
1204fe56b9e6SYuval Mintz 	/* Read Multi-function information from shmem */
1205fe56b9e6SYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1206fe56b9e6SYuval Mintz 	       offsetof(struct nvm_cfg1, glob) +
1207fe56b9e6SYuval Mintz 	       offsetof(struct nvm_cfg1_glob, generic_cont0);
1208fe56b9e6SYuval Mintz 
1209fe56b9e6SYuval Mintz 	generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1210fe56b9e6SYuval Mintz 
1211fe56b9e6SYuval Mintz 	mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1212fe56b9e6SYuval Mintz 		  NVM_CFG1_GLOB_MF_MODE_OFFSET;
1213fe56b9e6SYuval Mintz 
1214fe56b9e6SYuval Mintz 	switch (mf_mode) {
1215fe56b9e6SYuval Mintz 	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1216fc48b7a6SYuval Mintz 		p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
1217fe56b9e6SYuval Mintz 		break;
1218fe56b9e6SYuval Mintz 	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1219fc48b7a6SYuval Mintz 		p_hwfn->cdev->mf_mode = QED_MF_NPAR;
1220fe56b9e6SYuval Mintz 		break;
1221fc48b7a6SYuval Mintz 	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
1222fc48b7a6SYuval Mintz 		p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
1223fe56b9e6SYuval Mintz 		break;
1224fe56b9e6SYuval Mintz 	}
1225fe56b9e6SYuval Mintz 	DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1226fe56b9e6SYuval Mintz 		p_hwfn->cdev->mf_mode);
1227fe56b9e6SYuval Mintz 
1228fc48b7a6SYuval Mintz 	/* Read Multi-function information from shmem */
1229fc48b7a6SYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1230fc48b7a6SYuval Mintz 		offsetof(struct nvm_cfg1, glob) +
1231fc48b7a6SYuval Mintz 		offsetof(struct nvm_cfg1_glob, device_capabilities);
1232fc48b7a6SYuval Mintz 
1233fc48b7a6SYuval Mintz 	device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
1234fc48b7a6SYuval Mintz 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
1235fc48b7a6SYuval Mintz 		__set_bit(QED_DEV_CAP_ETH,
1236fc48b7a6SYuval Mintz 			  &p_hwfn->hw_info.device_capabilities);
1237fc48b7a6SYuval Mintz 
1238fe56b9e6SYuval Mintz 	return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1239fe56b9e6SYuval Mintz }
1240fe56b9e6SYuval Mintz 
1241fe56b9e6SYuval Mintz static int
1242fe56b9e6SYuval Mintz qed_get_hw_info(struct qed_hwfn *p_hwfn,
1243fe56b9e6SYuval Mintz 		struct qed_ptt *p_ptt,
1244fe56b9e6SYuval Mintz 		enum qed_pci_personality personality)
1245fe56b9e6SYuval Mintz {
1246fe56b9e6SYuval Mintz 	u32 port_mode;
1247fe56b9e6SYuval Mintz 	int rc;
1248fe56b9e6SYuval Mintz 
124932a47e72SYuval Mintz 	/* Since all information is common, only first hwfns should do this */
125032a47e72SYuval Mintz 	if (IS_LEAD_HWFN(p_hwfn)) {
125132a47e72SYuval Mintz 		rc = qed_iov_hw_info(p_hwfn);
125232a47e72SYuval Mintz 		if (rc)
125332a47e72SYuval Mintz 			return rc;
125432a47e72SYuval Mintz 	}
125532a47e72SYuval Mintz 
1256fe56b9e6SYuval Mintz 	/* Read the port mode */
1257fe56b9e6SYuval Mintz 	port_mode = qed_rd(p_hwfn, p_ptt,
1258fe56b9e6SYuval Mintz 			   CNIG_REG_NW_PORT_MODE_BB_B0);
1259fe56b9e6SYuval Mintz 
1260fe56b9e6SYuval Mintz 	if (port_mode < 3) {
1261fe56b9e6SYuval Mintz 		p_hwfn->cdev->num_ports_in_engines = 1;
1262fe56b9e6SYuval Mintz 	} else if (port_mode <= 5) {
1263fe56b9e6SYuval Mintz 		p_hwfn->cdev->num_ports_in_engines = 2;
1264fe56b9e6SYuval Mintz 	} else {
1265fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1266fe56b9e6SYuval Mintz 			  p_hwfn->cdev->num_ports_in_engines);
1267fe56b9e6SYuval Mintz 
1268fe56b9e6SYuval Mintz 		/* Default num_ports_in_engines to something */
1269fe56b9e6SYuval Mintz 		p_hwfn->cdev->num_ports_in_engines = 1;
1270fe56b9e6SYuval Mintz 	}
1271fe56b9e6SYuval Mintz 
1272fe56b9e6SYuval Mintz 	qed_hw_get_nvm_info(p_hwfn, p_ptt);
1273fe56b9e6SYuval Mintz 
1274fe56b9e6SYuval Mintz 	rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1275fe56b9e6SYuval Mintz 	if (rc)
1276fe56b9e6SYuval Mintz 		return rc;
1277fe56b9e6SYuval Mintz 
1278fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn))
1279fe56b9e6SYuval Mintz 		ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1280fe56b9e6SYuval Mintz 				p_hwfn->mcp_info->func_info.mac);
1281fe56b9e6SYuval Mintz 	else
1282fe56b9e6SYuval Mintz 		eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1283fe56b9e6SYuval Mintz 
1284fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn)) {
1285fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1286fe56b9e6SYuval Mintz 			p_hwfn->hw_info.ovlan =
1287fe56b9e6SYuval Mintz 				p_hwfn->mcp_info->func_info.ovlan;
1288fe56b9e6SYuval Mintz 
1289fe56b9e6SYuval Mintz 		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1290fe56b9e6SYuval Mintz 	}
1291fe56b9e6SYuval Mintz 
1292fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn)) {
1293fe56b9e6SYuval Mintz 		enum qed_pci_personality protocol;
1294fe56b9e6SYuval Mintz 
1295fe56b9e6SYuval Mintz 		protocol = p_hwfn->mcp_info->func_info.protocol;
1296fe56b9e6SYuval Mintz 		p_hwfn->hw_info.personality = protocol;
1297fe56b9e6SYuval Mintz 	}
1298fe56b9e6SYuval Mintz 
1299fe56b9e6SYuval Mintz 	qed_hw_get_resc(p_hwfn);
1300fe56b9e6SYuval Mintz 
1301fe56b9e6SYuval Mintz 	return rc;
1302fe56b9e6SYuval Mintz }
1303fe56b9e6SYuval Mintz 
130412e09c69SYuval Mintz static int qed_get_dev_info(struct qed_dev *cdev)
1305fe56b9e6SYuval Mintz {
1306fc48b7a6SYuval Mintz 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1307fe56b9e6SYuval Mintz 	u32 tmp;
1308fe56b9e6SYuval Mintz 
1309fc48b7a6SYuval Mintz 	/* Read Vendor Id / Device Id */
1310fc48b7a6SYuval Mintz 	pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
1311fc48b7a6SYuval Mintz 			     &cdev->vendor_id);
1312fc48b7a6SYuval Mintz 	pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
1313fc48b7a6SYuval Mintz 			     &cdev->device_id);
1314fc48b7a6SYuval Mintz 	cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1315fe56b9e6SYuval Mintz 				     MISCS_REG_CHIP_NUM);
1316fc48b7a6SYuval Mintz 	cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1317fe56b9e6SYuval Mintz 				     MISCS_REG_CHIP_REV);
1318fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_REV, cdev->chip_rev);
1319fe56b9e6SYuval Mintz 
1320fc48b7a6SYuval Mintz 	cdev->type = QED_DEV_TYPE_BB;
1321fe56b9e6SYuval Mintz 	/* Learn number of HW-functions */
1322fc48b7a6SYuval Mintz 	tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1323fe56b9e6SYuval Mintz 		     MISCS_REG_CMT_ENABLED_FOR_PAIR);
1324fe56b9e6SYuval Mintz 
1325fc48b7a6SYuval Mintz 	if (tmp & (1 << p_hwfn->rel_pf_id)) {
1326fe56b9e6SYuval Mintz 		DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1327fe56b9e6SYuval Mintz 		cdev->num_hwfns = 2;
1328fe56b9e6SYuval Mintz 	} else {
1329fe56b9e6SYuval Mintz 		cdev->num_hwfns = 1;
1330fe56b9e6SYuval Mintz 	}
1331fe56b9e6SYuval Mintz 
1332fc48b7a6SYuval Mintz 	cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1333fe56b9e6SYuval Mintz 				    MISCS_REG_CHIP_TEST_REG) >> 4;
1334fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
1335fc48b7a6SYuval Mintz 	cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1336fe56b9e6SYuval Mintz 				       MISCS_REG_CHIP_METAL);
1337fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1338fe56b9e6SYuval Mintz 
1339fe56b9e6SYuval Mintz 	DP_INFO(cdev->hwfns,
1340fe56b9e6SYuval Mintz 		"Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1341fe56b9e6SYuval Mintz 		cdev->chip_num, cdev->chip_rev,
1342fe56b9e6SYuval Mintz 		cdev->chip_bond_id, cdev->chip_metal);
134312e09c69SYuval Mintz 
134412e09c69SYuval Mintz 	if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
134512e09c69SYuval Mintz 		DP_NOTICE(cdev->hwfns,
134612e09c69SYuval Mintz 			  "The chip type/rev (BB A0) is not supported!\n");
134712e09c69SYuval Mintz 		return -EINVAL;
134812e09c69SYuval Mintz 	}
134912e09c69SYuval Mintz 
135012e09c69SYuval Mintz 	return 0;
1351fe56b9e6SYuval Mintz }
1352fe56b9e6SYuval Mintz 
1353fe56b9e6SYuval Mintz static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1354fe56b9e6SYuval Mintz 				 void __iomem *p_regview,
1355fe56b9e6SYuval Mintz 				 void __iomem *p_doorbells,
1356fe56b9e6SYuval Mintz 				 enum qed_pci_personality personality)
1357fe56b9e6SYuval Mintz {
1358fe56b9e6SYuval Mintz 	int rc = 0;
1359fe56b9e6SYuval Mintz 
1360fe56b9e6SYuval Mintz 	/* Split PCI bars evenly between hwfns */
1361fe56b9e6SYuval Mintz 	p_hwfn->regview = p_regview;
1362fe56b9e6SYuval Mintz 	p_hwfn->doorbells = p_doorbells;
1363fe56b9e6SYuval Mintz 
1364fe56b9e6SYuval Mintz 	/* Validate that chip access is feasible */
1365fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1366fe56b9e6SYuval Mintz 		DP_ERR(p_hwfn,
1367fe56b9e6SYuval Mintz 		       "Reading the ME register returns all Fs; Preventing further chip access\n");
1368fe56b9e6SYuval Mintz 		return -EINVAL;
1369fe56b9e6SYuval Mintz 	}
1370fe56b9e6SYuval Mintz 
1371fe56b9e6SYuval Mintz 	get_function_id(p_hwfn);
1372fe56b9e6SYuval Mintz 
137312e09c69SYuval Mintz 	/* Allocate PTT pool */
137412e09c69SYuval Mintz 	rc = qed_ptt_pool_alloc(p_hwfn);
1375fe56b9e6SYuval Mintz 	if (rc) {
1376fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1377fe56b9e6SYuval Mintz 		goto err0;
1378fe56b9e6SYuval Mintz 	}
1379fe56b9e6SYuval Mintz 
138012e09c69SYuval Mintz 	/* Allocate the main PTT */
138112e09c69SYuval Mintz 	p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
138212e09c69SYuval Mintz 
1383fe56b9e6SYuval Mintz 	/* First hwfn learns basic information, e.g., number of hwfns */
138412e09c69SYuval Mintz 	if (!p_hwfn->my_id) {
138512e09c69SYuval Mintz 		rc = qed_get_dev_info(p_hwfn->cdev);
138612e09c69SYuval Mintz 		if (rc != 0)
138712e09c69SYuval Mintz 			goto err1;
138812e09c69SYuval Mintz 	}
138912e09c69SYuval Mintz 
139012e09c69SYuval Mintz 	qed_hw_hwfn_prepare(p_hwfn);
1391fe56b9e6SYuval Mintz 
1392fe56b9e6SYuval Mintz 	/* Initialize MCP structure */
1393fe56b9e6SYuval Mintz 	rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1394fe56b9e6SYuval Mintz 	if (rc) {
1395fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1396fe56b9e6SYuval Mintz 		goto err1;
1397fe56b9e6SYuval Mintz 	}
1398fe56b9e6SYuval Mintz 
1399fe56b9e6SYuval Mintz 	/* Read the device configuration information from the HW and SHMEM */
1400fe56b9e6SYuval Mintz 	rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1401fe56b9e6SYuval Mintz 	if (rc) {
1402fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1403fe56b9e6SYuval Mintz 		goto err2;
1404fe56b9e6SYuval Mintz 	}
1405fe56b9e6SYuval Mintz 
1406fe56b9e6SYuval Mintz 	/* Allocate the init RT array and initialize the init-ops engine */
1407fe56b9e6SYuval Mintz 	rc = qed_init_alloc(p_hwfn);
1408fe56b9e6SYuval Mintz 	if (rc) {
1409fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1410fe56b9e6SYuval Mintz 		goto err2;
1411fe56b9e6SYuval Mintz 	}
1412fe56b9e6SYuval Mintz 
1413fe56b9e6SYuval Mintz 	return rc;
1414fe56b9e6SYuval Mintz err2:
141532a47e72SYuval Mintz 	if (IS_LEAD_HWFN(p_hwfn))
141632a47e72SYuval Mintz 		qed_iov_free_hw_info(p_hwfn->cdev);
1417fe56b9e6SYuval Mintz 	qed_mcp_free(p_hwfn);
1418fe56b9e6SYuval Mintz err1:
1419fe56b9e6SYuval Mintz 	qed_hw_hwfn_free(p_hwfn);
1420fe56b9e6SYuval Mintz err0:
1421fe56b9e6SYuval Mintz 	return rc;
1422fe56b9e6SYuval Mintz }
1423fe56b9e6SYuval Mintz 
1424fe56b9e6SYuval Mintz int qed_hw_prepare(struct qed_dev *cdev,
1425fe56b9e6SYuval Mintz 		   int personality)
1426fe56b9e6SYuval Mintz {
1427c78df14eSAriel Elior 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1428c78df14eSAriel Elior 	int rc;
1429fe56b9e6SYuval Mintz 
1430fe56b9e6SYuval Mintz 	/* Store the precompiled init data ptrs */
1431fe56b9e6SYuval Mintz 	qed_init_iro_array(cdev);
1432fe56b9e6SYuval Mintz 
1433fe56b9e6SYuval Mintz 	/* Initialize the first hwfn - will learn number of hwfns */
1434c78df14eSAriel Elior 	rc = qed_hw_prepare_single(p_hwfn,
1435c78df14eSAriel Elior 				   cdev->regview,
1436fe56b9e6SYuval Mintz 				   cdev->doorbells, personality);
1437fe56b9e6SYuval Mintz 	if (rc)
1438fe56b9e6SYuval Mintz 		return rc;
1439fe56b9e6SYuval Mintz 
1440c78df14eSAriel Elior 	personality = p_hwfn->hw_info.personality;
1441fe56b9e6SYuval Mintz 
1442fe56b9e6SYuval Mintz 	/* Initialize the rest of the hwfns */
1443c78df14eSAriel Elior 	if (cdev->num_hwfns > 1) {
1444fe56b9e6SYuval Mintz 		void __iomem *p_regview, *p_doorbell;
1445c78df14eSAriel Elior 		u8 __iomem *addr;
1446fe56b9e6SYuval Mintz 
1447c78df14eSAriel Elior 		/* adjust bar offset for second engine */
1448c2035eeaSRam Amrani 		addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
1449c78df14eSAriel Elior 		p_regview = addr;
1450c78df14eSAriel Elior 
1451c78df14eSAriel Elior 		/* adjust doorbell bar offset for second engine */
1452c2035eeaSRam Amrani 		addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
1453c78df14eSAriel Elior 		p_doorbell = addr;
1454c78df14eSAriel Elior 
1455c78df14eSAriel Elior 		/* prepare second hw function */
1456c78df14eSAriel Elior 		rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
1457fe56b9e6SYuval Mintz 					   p_doorbell, personality);
1458c78df14eSAriel Elior 
1459c78df14eSAriel Elior 		/* in case of error, need to free the previously
1460c78df14eSAriel Elior 		 * initiliazed hwfn 0.
1461c78df14eSAriel Elior 		 */
1462fe56b9e6SYuval Mintz 		if (rc) {
1463c78df14eSAriel Elior 			qed_init_free(p_hwfn);
1464c78df14eSAriel Elior 			qed_mcp_free(p_hwfn);
1465c78df14eSAriel Elior 			qed_hw_hwfn_free(p_hwfn);
1466fe56b9e6SYuval Mintz 		}
1467fe56b9e6SYuval Mintz 	}
1468fe56b9e6SYuval Mintz 
1469c78df14eSAriel Elior 	return rc;
1470fe56b9e6SYuval Mintz }
1471fe56b9e6SYuval Mintz 
1472fe56b9e6SYuval Mintz void qed_hw_remove(struct qed_dev *cdev)
1473fe56b9e6SYuval Mintz {
1474fe56b9e6SYuval Mintz 	int i;
1475fe56b9e6SYuval Mintz 
1476fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
1477fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1478fe56b9e6SYuval Mintz 
1479fe56b9e6SYuval Mintz 		qed_init_free(p_hwfn);
1480fe56b9e6SYuval Mintz 		qed_hw_hwfn_free(p_hwfn);
1481fe56b9e6SYuval Mintz 		qed_mcp_free(p_hwfn);
1482fe56b9e6SYuval Mintz 	}
148332a47e72SYuval Mintz 
148432a47e72SYuval Mintz 	qed_iov_free_hw_info(cdev);
1485fe56b9e6SYuval Mintz }
1486fe56b9e6SYuval Mintz 
1487fe56b9e6SYuval Mintz int qed_chain_alloc(struct qed_dev *cdev,
1488fe56b9e6SYuval Mintz 		    enum qed_chain_use_mode intended_use,
1489fe56b9e6SYuval Mintz 		    enum qed_chain_mode mode,
1490fe56b9e6SYuval Mintz 		    u16 num_elems,
1491fe56b9e6SYuval Mintz 		    size_t elem_size,
1492fe56b9e6SYuval Mintz 		    struct qed_chain *p_chain)
1493fe56b9e6SYuval Mintz {
1494fe56b9e6SYuval Mintz 	dma_addr_t p_pbl_phys = 0;
1495fe56b9e6SYuval Mintz 	void *p_pbl_virt = NULL;
1496fe56b9e6SYuval Mintz 	dma_addr_t p_phys = 0;
1497fe56b9e6SYuval Mintz 	void *p_virt = NULL;
1498fe56b9e6SYuval Mintz 	u16 page_cnt = 0;
1499fe56b9e6SYuval Mintz 	size_t size;
1500fe56b9e6SYuval Mintz 
1501fe56b9e6SYuval Mintz 	if (mode == QED_CHAIN_MODE_SINGLE)
1502fe56b9e6SYuval Mintz 		page_cnt = 1;
1503fe56b9e6SYuval Mintz 	else
1504fe56b9e6SYuval Mintz 		page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1505fe56b9e6SYuval Mintz 
1506fe56b9e6SYuval Mintz 	size = page_cnt * QED_CHAIN_PAGE_SIZE;
1507fe56b9e6SYuval Mintz 	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1508fe56b9e6SYuval Mintz 				    size, &p_phys, GFP_KERNEL);
1509fe56b9e6SYuval Mintz 	if (!p_virt) {
1510fe56b9e6SYuval Mintz 		DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1511fe56b9e6SYuval Mintz 		goto nomem;
1512fe56b9e6SYuval Mintz 	}
1513fe56b9e6SYuval Mintz 
1514fe56b9e6SYuval Mintz 	if (mode == QED_CHAIN_MODE_PBL) {
1515fe56b9e6SYuval Mintz 		size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1516fe56b9e6SYuval Mintz 		p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1517fe56b9e6SYuval Mintz 						size, &p_pbl_phys,
1518fe56b9e6SYuval Mintz 						GFP_KERNEL);
1519fe56b9e6SYuval Mintz 		if (!p_pbl_virt) {
1520fe56b9e6SYuval Mintz 			DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1521fe56b9e6SYuval Mintz 			goto nomem;
1522fe56b9e6SYuval Mintz 		}
1523fe56b9e6SYuval Mintz 
1524fe56b9e6SYuval Mintz 		qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1525fe56b9e6SYuval Mintz 				   (u8)elem_size, intended_use,
1526fe56b9e6SYuval Mintz 				   p_pbl_phys, p_pbl_virt);
1527fe56b9e6SYuval Mintz 	} else {
1528fe56b9e6SYuval Mintz 		qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1529fe56b9e6SYuval Mintz 			       (u8)elem_size, intended_use, mode);
1530fe56b9e6SYuval Mintz 	}
1531fe56b9e6SYuval Mintz 
1532fe56b9e6SYuval Mintz 	return 0;
1533fe56b9e6SYuval Mintz 
1534fe56b9e6SYuval Mintz nomem:
1535fe56b9e6SYuval Mintz 	dma_free_coherent(&cdev->pdev->dev,
1536fe56b9e6SYuval Mintz 			  page_cnt * QED_CHAIN_PAGE_SIZE,
1537fe56b9e6SYuval Mintz 			  p_virt, p_phys);
1538fe56b9e6SYuval Mintz 	dma_free_coherent(&cdev->pdev->dev,
1539fe56b9e6SYuval Mintz 			  page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1540fe56b9e6SYuval Mintz 			  p_pbl_virt, p_pbl_phys);
1541fe56b9e6SYuval Mintz 
1542fe56b9e6SYuval Mintz 	return -ENOMEM;
1543fe56b9e6SYuval Mintz }
1544fe56b9e6SYuval Mintz 
1545fe56b9e6SYuval Mintz void qed_chain_free(struct qed_dev *cdev,
1546fe56b9e6SYuval Mintz 		    struct qed_chain *p_chain)
1547fe56b9e6SYuval Mintz {
1548fe56b9e6SYuval Mintz 	size_t size;
1549fe56b9e6SYuval Mintz 
1550fe56b9e6SYuval Mintz 	if (!p_chain->p_virt_addr)
1551fe56b9e6SYuval Mintz 		return;
1552fe56b9e6SYuval Mintz 
1553fe56b9e6SYuval Mintz 	if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1554fe56b9e6SYuval Mintz 		size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1555fe56b9e6SYuval Mintz 		dma_free_coherent(&cdev->pdev->dev, size,
1556fe56b9e6SYuval Mintz 				  p_chain->pbl.p_virt_table,
1557fe56b9e6SYuval Mintz 				  p_chain->pbl.p_phys_table);
1558fe56b9e6SYuval Mintz 	}
1559fe56b9e6SYuval Mintz 
1560fe56b9e6SYuval Mintz 	size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1561fe56b9e6SYuval Mintz 	dma_free_coherent(&cdev->pdev->dev, size,
1562fe56b9e6SYuval Mintz 			  p_chain->p_virt_addr,
1563fe56b9e6SYuval Mintz 			  p_chain->p_phys_addr);
1564fe56b9e6SYuval Mintz }
1565cee4d264SManish Chopra 
1566cee4d264SManish Chopra int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
1567cee4d264SManish Chopra 		    u16 src_id, u16 *dst_id)
1568cee4d264SManish Chopra {
1569cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
1570cee4d264SManish Chopra 		u16 min, max;
1571cee4d264SManish Chopra 
1572cee4d264SManish Chopra 		min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
1573cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
1574cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
1575cee4d264SManish Chopra 			  "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1576cee4d264SManish Chopra 			  src_id, min, max);
1577cee4d264SManish Chopra 
1578cee4d264SManish Chopra 		return -EINVAL;
1579cee4d264SManish Chopra 	}
1580cee4d264SManish Chopra 
1581cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
1582cee4d264SManish Chopra 
1583cee4d264SManish Chopra 	return 0;
1584cee4d264SManish Chopra }
1585cee4d264SManish Chopra 
1586cee4d264SManish Chopra int qed_fw_vport(struct qed_hwfn *p_hwfn,
1587cee4d264SManish Chopra 		 u8 src_id, u8 *dst_id)
1588cee4d264SManish Chopra {
1589cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
1590cee4d264SManish Chopra 		u8 min, max;
1591cee4d264SManish Chopra 
1592cee4d264SManish Chopra 		min = (u8)RESC_START(p_hwfn, QED_VPORT);
1593cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_VPORT);
1594cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
1595cee4d264SManish Chopra 			  "vport id [%d] is not valid, available indices [%d - %d]\n",
1596cee4d264SManish Chopra 			  src_id, min, max);
1597cee4d264SManish Chopra 
1598cee4d264SManish Chopra 		return -EINVAL;
1599cee4d264SManish Chopra 	}
1600cee4d264SManish Chopra 
1601cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
1602cee4d264SManish Chopra 
1603cee4d264SManish Chopra 	return 0;
1604cee4d264SManish Chopra }
1605cee4d264SManish Chopra 
1606cee4d264SManish Chopra int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
1607cee4d264SManish Chopra 		   u8 src_id, u8 *dst_id)
1608cee4d264SManish Chopra {
1609cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
1610cee4d264SManish Chopra 		u8 min, max;
1611cee4d264SManish Chopra 
1612cee4d264SManish Chopra 		min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
1613cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
1614cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
1615cee4d264SManish Chopra 			  "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1616cee4d264SManish Chopra 			  src_id, min, max);
1617cee4d264SManish Chopra 
1618cee4d264SManish Chopra 		return -EINVAL;
1619cee4d264SManish Chopra 	}
1620cee4d264SManish Chopra 
1621cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
1622cee4d264SManish Chopra 
1623cee4d264SManish Chopra 	return 0;
1624cee4d264SManish Chopra }
1625bcd197c8SManish Chopra 
1626bcd197c8SManish Chopra /* Calculate final WFQ values for all vports and configure them.
1627bcd197c8SManish Chopra  * After this configuration each vport will have
1628bcd197c8SManish Chopra  * approx min rate =  min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
1629bcd197c8SManish Chopra  */
1630bcd197c8SManish Chopra static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1631bcd197c8SManish Chopra 					     struct qed_ptt *p_ptt,
1632bcd197c8SManish Chopra 					     u32 min_pf_rate)
1633bcd197c8SManish Chopra {
1634bcd197c8SManish Chopra 	struct init_qm_vport_params *vport_params;
1635bcd197c8SManish Chopra 	int i;
1636bcd197c8SManish Chopra 
1637bcd197c8SManish Chopra 	vport_params = p_hwfn->qm_info.qm_vport_params;
1638bcd197c8SManish Chopra 
1639bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1640bcd197c8SManish Chopra 		u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1641bcd197c8SManish Chopra 
1642bcd197c8SManish Chopra 		vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
1643bcd197c8SManish Chopra 						min_pf_rate;
1644bcd197c8SManish Chopra 		qed_init_vport_wfq(p_hwfn, p_ptt,
1645bcd197c8SManish Chopra 				   vport_params[i].first_tx_pq_id,
1646bcd197c8SManish Chopra 				   vport_params[i].vport_wfq);
1647bcd197c8SManish Chopra 	}
1648bcd197c8SManish Chopra }
1649bcd197c8SManish Chopra 
1650bcd197c8SManish Chopra static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
1651bcd197c8SManish Chopra 				       u32 min_pf_rate)
1652bcd197c8SManish Chopra 
1653bcd197c8SManish Chopra {
1654bcd197c8SManish Chopra 	int i;
1655bcd197c8SManish Chopra 
1656bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
1657bcd197c8SManish Chopra 		p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
1658bcd197c8SManish Chopra }
1659bcd197c8SManish Chopra 
1660bcd197c8SManish Chopra static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1661bcd197c8SManish Chopra 					   struct qed_ptt *p_ptt,
1662bcd197c8SManish Chopra 					   u32 min_pf_rate)
1663bcd197c8SManish Chopra {
1664bcd197c8SManish Chopra 	struct init_qm_vport_params *vport_params;
1665bcd197c8SManish Chopra 	int i;
1666bcd197c8SManish Chopra 
1667bcd197c8SManish Chopra 	vport_params = p_hwfn->qm_info.qm_vport_params;
1668bcd197c8SManish Chopra 
1669bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1670bcd197c8SManish Chopra 		qed_init_wfq_default_param(p_hwfn, min_pf_rate);
1671bcd197c8SManish Chopra 		qed_init_vport_wfq(p_hwfn, p_ptt,
1672bcd197c8SManish Chopra 				   vport_params[i].first_tx_pq_id,
1673bcd197c8SManish Chopra 				   vport_params[i].vport_wfq);
1674bcd197c8SManish Chopra 	}
1675bcd197c8SManish Chopra }
1676bcd197c8SManish Chopra 
1677bcd197c8SManish Chopra /* This function performs several validations for WFQ
1678bcd197c8SManish Chopra  * configuration and required min rate for a given vport
1679bcd197c8SManish Chopra  * 1. req_rate must be greater than one percent of min_pf_rate.
1680bcd197c8SManish Chopra  * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
1681bcd197c8SManish Chopra  *    rates to get less than one percent of min_pf_rate.
1682bcd197c8SManish Chopra  * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
1683bcd197c8SManish Chopra  */
1684bcd197c8SManish Chopra static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
1685bcd197c8SManish Chopra 			      u16 vport_id, u32 req_rate,
1686bcd197c8SManish Chopra 			      u32 min_pf_rate)
1687bcd197c8SManish Chopra {
1688bcd197c8SManish Chopra 	u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
1689bcd197c8SManish Chopra 	int non_requested_count = 0, req_count = 0, i, num_vports;
1690bcd197c8SManish Chopra 
1691bcd197c8SManish Chopra 	num_vports = p_hwfn->qm_info.num_vports;
1692bcd197c8SManish Chopra 
1693bcd197c8SManish Chopra 	/* Accounting for the vports which are configured for WFQ explicitly */
1694bcd197c8SManish Chopra 	for (i = 0; i < num_vports; i++) {
1695bcd197c8SManish Chopra 		u32 tmp_speed;
1696bcd197c8SManish Chopra 
1697bcd197c8SManish Chopra 		if ((i != vport_id) &&
1698bcd197c8SManish Chopra 		    p_hwfn->qm_info.wfq_data[i].configured) {
1699bcd197c8SManish Chopra 			req_count++;
1700bcd197c8SManish Chopra 			tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1701bcd197c8SManish Chopra 			total_req_min_rate += tmp_speed;
1702bcd197c8SManish Chopra 		}
1703bcd197c8SManish Chopra 	}
1704bcd197c8SManish Chopra 
1705bcd197c8SManish Chopra 	/* Include current vport data as well */
1706bcd197c8SManish Chopra 	req_count++;
1707bcd197c8SManish Chopra 	total_req_min_rate += req_rate;
1708bcd197c8SManish Chopra 	non_requested_count = num_vports - req_count;
1709bcd197c8SManish Chopra 
1710bcd197c8SManish Chopra 	if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
1711bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1712bcd197c8SManish Chopra 			   "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1713bcd197c8SManish Chopra 			   vport_id, req_rate, min_pf_rate);
1714bcd197c8SManish Chopra 		return -EINVAL;
1715bcd197c8SManish Chopra 	}
1716bcd197c8SManish Chopra 
1717bcd197c8SManish Chopra 	if (num_vports > QED_WFQ_UNIT) {
1718bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1719bcd197c8SManish Chopra 			   "Number of vports is greater than %d\n",
1720bcd197c8SManish Chopra 			   QED_WFQ_UNIT);
1721bcd197c8SManish Chopra 		return -EINVAL;
1722bcd197c8SManish Chopra 	}
1723bcd197c8SManish Chopra 
1724bcd197c8SManish Chopra 	if (total_req_min_rate > min_pf_rate) {
1725bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1726bcd197c8SManish Chopra 			   "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
1727bcd197c8SManish Chopra 			   total_req_min_rate, min_pf_rate);
1728bcd197c8SManish Chopra 		return -EINVAL;
1729bcd197c8SManish Chopra 	}
1730bcd197c8SManish Chopra 
1731bcd197c8SManish Chopra 	total_left_rate	= min_pf_rate - total_req_min_rate;
1732bcd197c8SManish Chopra 
1733bcd197c8SManish Chopra 	left_rate_per_vp = total_left_rate / non_requested_count;
1734bcd197c8SManish Chopra 	if (left_rate_per_vp <  min_pf_rate / QED_WFQ_UNIT) {
1735bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1736bcd197c8SManish Chopra 			   "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1737bcd197c8SManish Chopra 			   left_rate_per_vp, min_pf_rate);
1738bcd197c8SManish Chopra 		return -EINVAL;
1739bcd197c8SManish Chopra 	}
1740bcd197c8SManish Chopra 
1741bcd197c8SManish Chopra 	p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
1742bcd197c8SManish Chopra 	p_hwfn->qm_info.wfq_data[vport_id].configured = true;
1743bcd197c8SManish Chopra 
1744bcd197c8SManish Chopra 	for (i = 0; i < num_vports; i++) {
1745bcd197c8SManish Chopra 		if (p_hwfn->qm_info.wfq_data[i].configured)
1746bcd197c8SManish Chopra 			continue;
1747bcd197c8SManish Chopra 
1748bcd197c8SManish Chopra 		p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
1749bcd197c8SManish Chopra 	}
1750bcd197c8SManish Chopra 
1751bcd197c8SManish Chopra 	return 0;
1752bcd197c8SManish Chopra }
1753bcd197c8SManish Chopra 
1754bcd197c8SManish Chopra static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
1755bcd197c8SManish Chopra 						 struct qed_ptt *p_ptt,
1756bcd197c8SManish Chopra 						 u32 min_pf_rate)
1757bcd197c8SManish Chopra {
1758bcd197c8SManish Chopra 	bool use_wfq = false;
1759bcd197c8SManish Chopra 	int rc = 0;
1760bcd197c8SManish Chopra 	u16 i;
1761bcd197c8SManish Chopra 
1762bcd197c8SManish Chopra 	/* Validate all pre configured vports for wfq */
1763bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1764bcd197c8SManish Chopra 		u32 rate;
1765bcd197c8SManish Chopra 
1766bcd197c8SManish Chopra 		if (!p_hwfn->qm_info.wfq_data[i].configured)
1767bcd197c8SManish Chopra 			continue;
1768bcd197c8SManish Chopra 
1769bcd197c8SManish Chopra 		rate = p_hwfn->qm_info.wfq_data[i].min_speed;
1770bcd197c8SManish Chopra 		use_wfq = true;
1771bcd197c8SManish Chopra 
1772bcd197c8SManish Chopra 		rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
1773bcd197c8SManish Chopra 		if (rc) {
1774bcd197c8SManish Chopra 			DP_NOTICE(p_hwfn,
1775bcd197c8SManish Chopra 				  "WFQ validation failed while configuring min rate\n");
1776bcd197c8SManish Chopra 			break;
1777bcd197c8SManish Chopra 		}
1778bcd197c8SManish Chopra 	}
1779bcd197c8SManish Chopra 
1780bcd197c8SManish Chopra 	if (!rc && use_wfq)
1781bcd197c8SManish Chopra 		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
1782bcd197c8SManish Chopra 	else
1783bcd197c8SManish Chopra 		qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
1784bcd197c8SManish Chopra 
1785bcd197c8SManish Chopra 	return rc;
1786bcd197c8SManish Chopra }
1787bcd197c8SManish Chopra 
1788bcd197c8SManish Chopra /* API to configure WFQ from mcp link change */
1789bcd197c8SManish Chopra void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
1790bcd197c8SManish Chopra {
1791bcd197c8SManish Chopra 	int i;
1792bcd197c8SManish Chopra 
1793bcd197c8SManish Chopra 	for_each_hwfn(cdev, i) {
1794bcd197c8SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1795bcd197c8SManish Chopra 
1796bcd197c8SManish Chopra 		__qed_configure_vp_wfq_on_link_change(p_hwfn,
1797bcd197c8SManish Chopra 						      p_hwfn->p_dpc_ptt,
1798bcd197c8SManish Chopra 						      min_pf_rate);
1799bcd197c8SManish Chopra 	}
1800bcd197c8SManish Chopra }
18014b01e519SManish Chopra 
18024b01e519SManish Chopra int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
18034b01e519SManish Chopra 				     struct qed_ptt *p_ptt,
18044b01e519SManish Chopra 				     struct qed_mcp_link_state *p_link,
18054b01e519SManish Chopra 				     u8 max_bw)
18064b01e519SManish Chopra {
18074b01e519SManish Chopra 	int rc = 0;
18084b01e519SManish Chopra 
18094b01e519SManish Chopra 	p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
18104b01e519SManish Chopra 
18114b01e519SManish Chopra 	if (!p_link->line_speed && (max_bw != 100))
18124b01e519SManish Chopra 		return rc;
18134b01e519SManish Chopra 
18144b01e519SManish Chopra 	p_link->speed = (p_link->line_speed * max_bw) / 100;
18154b01e519SManish Chopra 	p_hwfn->qm_info.pf_rl = p_link->speed;
18164b01e519SManish Chopra 
18174b01e519SManish Chopra 	/* Since the limiter also affects Tx-switched traffic, we don't want it
18184b01e519SManish Chopra 	 * to limit such traffic in case there's no actual limit.
18194b01e519SManish Chopra 	 * In that case, set limit to imaginary high boundary.
18204b01e519SManish Chopra 	 */
18214b01e519SManish Chopra 	if (max_bw == 100)
18224b01e519SManish Chopra 		p_hwfn->qm_info.pf_rl = 100000;
18234b01e519SManish Chopra 
18244b01e519SManish Chopra 	rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
18254b01e519SManish Chopra 			    p_hwfn->qm_info.pf_rl);
18264b01e519SManish Chopra 
18274b01e519SManish Chopra 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
18284b01e519SManish Chopra 		   "Configured MAX bandwidth to be %08x Mb/sec\n",
18294b01e519SManish Chopra 		   p_link->speed);
18304b01e519SManish Chopra 
18314b01e519SManish Chopra 	return rc;
18324b01e519SManish Chopra }
18334b01e519SManish Chopra 
18344b01e519SManish Chopra /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
18354b01e519SManish Chopra int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
18364b01e519SManish Chopra {
18374b01e519SManish Chopra 	int i, rc = -EINVAL;
18384b01e519SManish Chopra 
18394b01e519SManish Chopra 	if (max_bw < 1 || max_bw > 100) {
18404b01e519SManish Chopra 		DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
18414b01e519SManish Chopra 		return rc;
18424b01e519SManish Chopra 	}
18434b01e519SManish Chopra 
18444b01e519SManish Chopra 	for_each_hwfn(cdev, i) {
18454b01e519SManish Chopra 		struct qed_hwfn	*p_hwfn = &cdev->hwfns[i];
18464b01e519SManish Chopra 		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
18474b01e519SManish Chopra 		struct qed_mcp_link_state *p_link;
18484b01e519SManish Chopra 		struct qed_ptt *p_ptt;
18494b01e519SManish Chopra 
18504b01e519SManish Chopra 		p_link = &p_lead->mcp_info->link_output;
18514b01e519SManish Chopra 
18524b01e519SManish Chopra 		p_ptt = qed_ptt_acquire(p_hwfn);
18534b01e519SManish Chopra 		if (!p_ptt)
18544b01e519SManish Chopra 			return -EBUSY;
18554b01e519SManish Chopra 
18564b01e519SManish Chopra 		rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
18574b01e519SManish Chopra 						      p_link, max_bw);
18584b01e519SManish Chopra 
18594b01e519SManish Chopra 		qed_ptt_release(p_hwfn, p_ptt);
18604b01e519SManish Chopra 
18614b01e519SManish Chopra 		if (rc)
18624b01e519SManish Chopra 			break;
18634b01e519SManish Chopra 	}
18644b01e519SManish Chopra 
18654b01e519SManish Chopra 	return rc;
18664b01e519SManish Chopra }
1867a64b02d5SManish Chopra 
1868a64b02d5SManish Chopra int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
1869a64b02d5SManish Chopra 				     struct qed_ptt *p_ptt,
1870a64b02d5SManish Chopra 				     struct qed_mcp_link_state *p_link,
1871a64b02d5SManish Chopra 				     u8 min_bw)
1872a64b02d5SManish Chopra {
1873a64b02d5SManish Chopra 	int rc = 0;
1874a64b02d5SManish Chopra 
1875a64b02d5SManish Chopra 	p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
1876a64b02d5SManish Chopra 	p_hwfn->qm_info.pf_wfq = min_bw;
1877a64b02d5SManish Chopra 
1878a64b02d5SManish Chopra 	if (!p_link->line_speed)
1879a64b02d5SManish Chopra 		return rc;
1880a64b02d5SManish Chopra 
1881a64b02d5SManish Chopra 	p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
1882a64b02d5SManish Chopra 
1883a64b02d5SManish Chopra 	rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
1884a64b02d5SManish Chopra 
1885a64b02d5SManish Chopra 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1886a64b02d5SManish Chopra 		   "Configured MIN bandwidth to be %d Mb/sec\n",
1887a64b02d5SManish Chopra 		   p_link->min_pf_rate);
1888a64b02d5SManish Chopra 
1889a64b02d5SManish Chopra 	return rc;
1890a64b02d5SManish Chopra }
1891a64b02d5SManish Chopra 
1892a64b02d5SManish Chopra /* Main API to configure PF min bandwidth where bw range is [1-100] */
1893a64b02d5SManish Chopra int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
1894a64b02d5SManish Chopra {
1895a64b02d5SManish Chopra 	int i, rc = -EINVAL;
1896a64b02d5SManish Chopra 
1897a64b02d5SManish Chopra 	if (min_bw < 1 || min_bw > 100) {
1898a64b02d5SManish Chopra 		DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
1899a64b02d5SManish Chopra 		return rc;
1900a64b02d5SManish Chopra 	}
1901a64b02d5SManish Chopra 
1902a64b02d5SManish Chopra 	for_each_hwfn(cdev, i) {
1903a64b02d5SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1904a64b02d5SManish Chopra 		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
1905a64b02d5SManish Chopra 		struct qed_mcp_link_state *p_link;
1906a64b02d5SManish Chopra 		struct qed_ptt *p_ptt;
1907a64b02d5SManish Chopra 
1908a64b02d5SManish Chopra 		p_link = &p_lead->mcp_info->link_output;
1909a64b02d5SManish Chopra 
1910a64b02d5SManish Chopra 		p_ptt = qed_ptt_acquire(p_hwfn);
1911a64b02d5SManish Chopra 		if (!p_ptt)
1912a64b02d5SManish Chopra 			return -EBUSY;
1913a64b02d5SManish Chopra 
1914a64b02d5SManish Chopra 		rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
1915a64b02d5SManish Chopra 						      p_link, min_bw);
1916a64b02d5SManish Chopra 		if (rc) {
1917a64b02d5SManish Chopra 			qed_ptt_release(p_hwfn, p_ptt);
1918a64b02d5SManish Chopra 			return rc;
1919a64b02d5SManish Chopra 		}
1920a64b02d5SManish Chopra 
1921a64b02d5SManish Chopra 		if (p_link->min_pf_rate) {
1922a64b02d5SManish Chopra 			u32 min_rate = p_link->min_pf_rate;
1923a64b02d5SManish Chopra 
1924a64b02d5SManish Chopra 			rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
1925a64b02d5SManish Chopra 								   p_ptt,
1926a64b02d5SManish Chopra 								   min_rate);
1927a64b02d5SManish Chopra 		}
1928a64b02d5SManish Chopra 
1929a64b02d5SManish Chopra 		qed_ptt_release(p_hwfn, p_ptt);
1930a64b02d5SManish Chopra 	}
1931a64b02d5SManish Chopra 
1932a64b02d5SManish Chopra 	return rc;
1933a64b02d5SManish Chopra }
1934