1fe56b9e6SYuval Mintz /* QLogic qed NIC Driver
2fe56b9e6SYuval Mintz  * Copyright (c) 2015 QLogic Corporation
3fe56b9e6SYuval Mintz  *
4fe56b9e6SYuval Mintz  * This software is available under the terms of the GNU General Public License
5fe56b9e6SYuval Mintz  * (GPL) Version 2, available from the file COPYING in the main directory of
6fe56b9e6SYuval Mintz  * this source tree.
7fe56b9e6SYuval Mintz  */
8fe56b9e6SYuval Mintz 
9fe56b9e6SYuval Mintz #include <linux/types.h>
10fe56b9e6SYuval Mintz #include <asm/byteorder.h>
11fe56b9e6SYuval Mintz #include <linux/io.h>
12fe56b9e6SYuval Mintz #include <linux/delay.h>
13fe56b9e6SYuval Mintz #include <linux/dma-mapping.h>
14fe56b9e6SYuval Mintz #include <linux/errno.h>
15fe56b9e6SYuval Mintz #include <linux/kernel.h>
16fe56b9e6SYuval Mintz #include <linux/mutex.h>
17fe56b9e6SYuval Mintz #include <linux/pci.h>
18fe56b9e6SYuval Mintz #include <linux/slab.h>
19fe56b9e6SYuval Mintz #include <linux/string.h>
20fe56b9e6SYuval Mintz #include <linux/etherdevice.h>
21fe56b9e6SYuval Mintz #include <linux/qed/qed_chain.h>
22fe56b9e6SYuval Mintz #include <linux/qed/qed_if.h>
23fe56b9e6SYuval Mintz #include "qed.h"
24fe56b9e6SYuval Mintz #include "qed_cxt.h"
25fe56b9e6SYuval Mintz #include "qed_dev_api.h"
26fe56b9e6SYuval Mintz #include "qed_hsi.h"
27fe56b9e6SYuval Mintz #include "qed_hw.h"
28fe56b9e6SYuval Mintz #include "qed_init_ops.h"
29fe56b9e6SYuval Mintz #include "qed_int.h"
30fe56b9e6SYuval Mintz #include "qed_mcp.h"
31fe56b9e6SYuval Mintz #include "qed_reg_addr.h"
32fe56b9e6SYuval Mintz #include "qed_sp.h"
3332a47e72SYuval Mintz #include "qed_sriov.h"
340b55e27dSYuval Mintz #include "qed_vf.h"
35fe56b9e6SYuval Mintz 
36fe56b9e6SYuval Mintz /* API common to all protocols */
37c2035eeaSRam Amrani enum BAR_ID {
38c2035eeaSRam Amrani 	BAR_ID_0,       /* used for GRC */
39c2035eeaSRam Amrani 	BAR_ID_1        /* Used for doorbells */
40c2035eeaSRam Amrani };
41c2035eeaSRam Amrani 
42c2035eeaSRam Amrani static u32 qed_hw_bar_size(struct qed_hwfn	*p_hwfn,
43c2035eeaSRam Amrani 			   enum BAR_ID		bar_id)
44c2035eeaSRam Amrani {
45c2035eeaSRam Amrani 	u32 bar_reg = (bar_id == BAR_ID_0 ?
46c2035eeaSRam Amrani 		       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
471408cc1fSYuval Mintz 	u32 val;
48c2035eeaSRam Amrani 
491408cc1fSYuval Mintz 	if (IS_VF(p_hwfn->cdev))
501408cc1fSYuval Mintz 		return 1 << 17;
511408cc1fSYuval Mintz 
521408cc1fSYuval Mintz 	val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
53c2035eeaSRam Amrani 	if (val)
54c2035eeaSRam Amrani 		return 1 << (val + 15);
55c2035eeaSRam Amrani 
56c2035eeaSRam Amrani 	/* Old MFW initialized above registered only conditionally */
57c2035eeaSRam Amrani 	if (p_hwfn->cdev->num_hwfns > 1) {
58c2035eeaSRam Amrani 		DP_INFO(p_hwfn,
59c2035eeaSRam Amrani 			"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
60c2035eeaSRam Amrani 			return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
61c2035eeaSRam Amrani 	} else {
62c2035eeaSRam Amrani 		DP_INFO(p_hwfn,
63c2035eeaSRam Amrani 			"BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
64c2035eeaSRam Amrani 			return 512 * 1024;
65c2035eeaSRam Amrani 	}
66c2035eeaSRam Amrani }
67c2035eeaSRam Amrani 
68fe56b9e6SYuval Mintz void qed_init_dp(struct qed_dev *cdev,
69fe56b9e6SYuval Mintz 		 u32 dp_module, u8 dp_level)
70fe56b9e6SYuval Mintz {
71fe56b9e6SYuval Mintz 	u32 i;
72fe56b9e6SYuval Mintz 
73fe56b9e6SYuval Mintz 	cdev->dp_level = dp_level;
74fe56b9e6SYuval Mintz 	cdev->dp_module = dp_module;
75fe56b9e6SYuval Mintz 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
76fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
77fe56b9e6SYuval Mintz 
78fe56b9e6SYuval Mintz 		p_hwfn->dp_level = dp_level;
79fe56b9e6SYuval Mintz 		p_hwfn->dp_module = dp_module;
80fe56b9e6SYuval Mintz 	}
81fe56b9e6SYuval Mintz }
82fe56b9e6SYuval Mintz 
83fe56b9e6SYuval Mintz void qed_init_struct(struct qed_dev *cdev)
84fe56b9e6SYuval Mintz {
85fe56b9e6SYuval Mintz 	u8 i;
86fe56b9e6SYuval Mintz 
87fe56b9e6SYuval Mintz 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
88fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
89fe56b9e6SYuval Mintz 
90fe56b9e6SYuval Mintz 		p_hwfn->cdev = cdev;
91fe56b9e6SYuval Mintz 		p_hwfn->my_id = i;
92fe56b9e6SYuval Mintz 		p_hwfn->b_active = false;
93fe56b9e6SYuval Mintz 
94fe56b9e6SYuval Mintz 		mutex_init(&p_hwfn->dmae_info.mutex);
95fe56b9e6SYuval Mintz 	}
96fe56b9e6SYuval Mintz 
97fe56b9e6SYuval Mintz 	/* hwfn 0 is always active */
98fe56b9e6SYuval Mintz 	cdev->hwfns[0].b_active = true;
99fe56b9e6SYuval Mintz 
100fe56b9e6SYuval Mintz 	/* set the default cache alignment to 128 */
101fe56b9e6SYuval Mintz 	cdev->cache_shift = 7;
102fe56b9e6SYuval Mintz }
103fe56b9e6SYuval Mintz 
104fe56b9e6SYuval Mintz static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
105fe56b9e6SYuval Mintz {
106fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
107fe56b9e6SYuval Mintz 
108fe56b9e6SYuval Mintz 	kfree(qm_info->qm_pq_params);
109fe56b9e6SYuval Mintz 	qm_info->qm_pq_params = NULL;
110fe56b9e6SYuval Mintz 	kfree(qm_info->qm_vport_params);
111fe56b9e6SYuval Mintz 	qm_info->qm_vport_params = NULL;
112fe56b9e6SYuval Mintz 	kfree(qm_info->qm_port_params);
113fe56b9e6SYuval Mintz 	qm_info->qm_port_params = NULL;
114bcd197c8SManish Chopra 	kfree(qm_info->wfq_data);
115bcd197c8SManish Chopra 	qm_info->wfq_data = NULL;
116fe56b9e6SYuval Mintz }
117fe56b9e6SYuval Mintz 
118fe56b9e6SYuval Mintz void qed_resc_free(struct qed_dev *cdev)
119fe56b9e6SYuval Mintz {
120fe56b9e6SYuval Mintz 	int i;
121fe56b9e6SYuval Mintz 
1221408cc1fSYuval Mintz 	if (IS_VF(cdev))
1231408cc1fSYuval Mintz 		return;
1241408cc1fSYuval Mintz 
125fe56b9e6SYuval Mintz 	kfree(cdev->fw_data);
126fe56b9e6SYuval Mintz 	cdev->fw_data = NULL;
127fe56b9e6SYuval Mintz 
128fe56b9e6SYuval Mintz 	kfree(cdev->reset_stats);
129fe56b9e6SYuval Mintz 
130fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
131fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
132fe56b9e6SYuval Mintz 
13325c089d7SYuval Mintz 		kfree(p_hwfn->p_tx_cids);
13425c089d7SYuval Mintz 		p_hwfn->p_tx_cids = NULL;
13525c089d7SYuval Mintz 		kfree(p_hwfn->p_rx_cids);
13625c089d7SYuval Mintz 		p_hwfn->p_rx_cids = NULL;
13725c089d7SYuval Mintz 	}
13825c089d7SYuval Mintz 
13925c089d7SYuval Mintz 	for_each_hwfn(cdev, i) {
14025c089d7SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
14125c089d7SYuval Mintz 
142fe56b9e6SYuval Mintz 		qed_cxt_mngr_free(p_hwfn);
143fe56b9e6SYuval Mintz 		qed_qm_info_free(p_hwfn);
144fe56b9e6SYuval Mintz 		qed_spq_free(p_hwfn);
145fe56b9e6SYuval Mintz 		qed_eq_free(p_hwfn, p_hwfn->p_eq);
146fe56b9e6SYuval Mintz 		qed_consq_free(p_hwfn, p_hwfn->p_consq);
147fe56b9e6SYuval Mintz 		qed_int_free(p_hwfn);
14832a47e72SYuval Mintz 		qed_iov_free(p_hwfn);
149fe56b9e6SYuval Mintz 		qed_dmae_info_free(p_hwfn);
150fe56b9e6SYuval Mintz 	}
151fe56b9e6SYuval Mintz }
152fe56b9e6SYuval Mintz 
153fe56b9e6SYuval Mintz static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
154fe56b9e6SYuval Mintz {
1551408cc1fSYuval Mintz 	u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
156fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
157fe56b9e6SYuval Mintz 	struct init_qm_port_params *p_qm_port;
158fe56b9e6SYuval Mintz 	u16 num_pqs, multi_cos_tcs = 1;
1591408cc1fSYuval Mintz 	u16 num_vfs = 0;
160fe56b9e6SYuval Mintz 
1611408cc1fSYuval Mintz #ifdef CONFIG_QED_SRIOV
1621408cc1fSYuval Mintz 	if (p_hwfn->cdev->p_iov_info)
1631408cc1fSYuval Mintz 		num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
1641408cc1fSYuval Mintz #endif
165fe56b9e6SYuval Mintz 	memset(qm_info, 0, sizeof(*qm_info));
166fe56b9e6SYuval Mintz 
1671408cc1fSYuval Mintz 	num_pqs = multi_cos_tcs + num_vfs + 1;	/* The '1' is for pure-LB */
168fe56b9e6SYuval Mintz 	num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
169fe56b9e6SYuval Mintz 
170fe56b9e6SYuval Mintz 	/* Sanity checking that setup requires legal number of resources */
171fe56b9e6SYuval Mintz 	if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
172fe56b9e6SYuval Mintz 		DP_ERR(p_hwfn,
173fe56b9e6SYuval Mintz 		       "Need too many Physical queues - 0x%04x when only %04x are available\n",
174fe56b9e6SYuval Mintz 		       num_pqs, RESC_NUM(p_hwfn, QED_PQ));
175fe56b9e6SYuval Mintz 		return -EINVAL;
176fe56b9e6SYuval Mintz 	}
177fe56b9e6SYuval Mintz 
178fe56b9e6SYuval Mintz 	/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
179fe56b9e6SYuval Mintz 	 */
180fe56b9e6SYuval Mintz 	qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
18160fffb3bSYuval Mintz 					num_pqs, GFP_KERNEL);
182fe56b9e6SYuval Mintz 	if (!qm_info->qm_pq_params)
183fe56b9e6SYuval Mintz 		goto alloc_err;
184fe56b9e6SYuval Mintz 
185fe56b9e6SYuval Mintz 	qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
18660fffb3bSYuval Mintz 					   num_vports, GFP_KERNEL);
187fe56b9e6SYuval Mintz 	if (!qm_info->qm_vport_params)
188fe56b9e6SYuval Mintz 		goto alloc_err;
189fe56b9e6SYuval Mintz 
190fe56b9e6SYuval Mintz 	qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
19160fffb3bSYuval Mintz 					  MAX_NUM_PORTS, GFP_KERNEL);
192fe56b9e6SYuval Mintz 	if (!qm_info->qm_port_params)
193fe56b9e6SYuval Mintz 		goto alloc_err;
194fe56b9e6SYuval Mintz 
195bcd197c8SManish Chopra 	qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
196bcd197c8SManish Chopra 				    GFP_KERNEL);
197bcd197c8SManish Chopra 	if (!qm_info->wfq_data)
198bcd197c8SManish Chopra 		goto alloc_err;
199bcd197c8SManish Chopra 
200fe56b9e6SYuval Mintz 	vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
201fe56b9e6SYuval Mintz 
202fe56b9e6SYuval Mintz 	/* First init per-TC PQs */
2031408cc1fSYuval Mintz 	for (i = 0; i < multi_cos_tcs; i++, curr_queue++) {
2041408cc1fSYuval Mintz 		struct init_qm_pq_params *params =
2051408cc1fSYuval Mintz 		    &qm_info->qm_pq_params[curr_queue];
206fe56b9e6SYuval Mintz 
207fe56b9e6SYuval Mintz 		params->vport_id = vport_id;
208fe56b9e6SYuval Mintz 		params->tc_id = p_hwfn->hw_info.non_offload_tc;
209fe56b9e6SYuval Mintz 		params->wrr_group = 1;
210fe56b9e6SYuval Mintz 	}
211fe56b9e6SYuval Mintz 
212fe56b9e6SYuval Mintz 	/* Then init pure-LB PQ */
2131408cc1fSYuval Mintz 	qm_info->pure_lb_pq = curr_queue;
2141408cc1fSYuval Mintz 	qm_info->qm_pq_params[curr_queue].vport_id =
2151408cc1fSYuval Mintz 	    (u8) RESC_START(p_hwfn, QED_VPORT);
2161408cc1fSYuval Mintz 	qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
2171408cc1fSYuval Mintz 	qm_info->qm_pq_params[curr_queue].wrr_group = 1;
2181408cc1fSYuval Mintz 	curr_queue++;
219fe56b9e6SYuval Mintz 
220fe56b9e6SYuval Mintz 	qm_info->offload_pq = 0;
2211408cc1fSYuval Mintz 	/* Then init per-VF PQs */
2221408cc1fSYuval Mintz 	vf_offset = curr_queue;
2231408cc1fSYuval Mintz 	for (i = 0; i < num_vfs; i++) {
2241408cc1fSYuval Mintz 		/* First vport is used by the PF */
2251408cc1fSYuval Mintz 		qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
2261408cc1fSYuval Mintz 		qm_info->qm_pq_params[curr_queue].tc_id =
2271408cc1fSYuval Mintz 		    p_hwfn->hw_info.non_offload_tc;
2281408cc1fSYuval Mintz 		qm_info->qm_pq_params[curr_queue].wrr_group = 1;
2291408cc1fSYuval Mintz 		curr_queue++;
2301408cc1fSYuval Mintz 	}
2311408cc1fSYuval Mintz 
2321408cc1fSYuval Mintz 	qm_info->vf_queues_offset = vf_offset;
233fe56b9e6SYuval Mintz 	qm_info->num_pqs = num_pqs;
234fe56b9e6SYuval Mintz 	qm_info->num_vports = num_vports;
235fe56b9e6SYuval Mintz 
236fe56b9e6SYuval Mintz 	/* Initialize qm port parameters */
237fe56b9e6SYuval Mintz 	num_ports = p_hwfn->cdev->num_ports_in_engines;
238fe56b9e6SYuval Mintz 	for (i = 0; i < num_ports; i++) {
239fe56b9e6SYuval Mintz 		p_qm_port = &qm_info->qm_port_params[i];
240fe56b9e6SYuval Mintz 		p_qm_port->active = 1;
241fe56b9e6SYuval Mintz 		p_qm_port->num_active_phys_tcs = 4;
242fe56b9e6SYuval Mintz 		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
243fe56b9e6SYuval Mintz 		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
244fe56b9e6SYuval Mintz 	}
245fe56b9e6SYuval Mintz 
246fe56b9e6SYuval Mintz 	qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
247fe56b9e6SYuval Mintz 
248fe56b9e6SYuval Mintz 	qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
249fe56b9e6SYuval Mintz 
2501408cc1fSYuval Mintz 	qm_info->num_vf_pqs = num_vfs;
251fe56b9e6SYuval Mintz 	qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
252fe56b9e6SYuval Mintz 
253a64b02d5SManish Chopra 	for (i = 0; i < qm_info->num_vports; i++)
254a64b02d5SManish Chopra 		qm_info->qm_vport_params[i].vport_wfq = 1;
255a64b02d5SManish Chopra 
256fe56b9e6SYuval Mintz 	qm_info->pf_wfq = 0;
257fe56b9e6SYuval Mintz 	qm_info->pf_rl = 0;
258fe56b9e6SYuval Mintz 	qm_info->vport_rl_en = 1;
259a64b02d5SManish Chopra 	qm_info->vport_wfq_en = 1;
260fe56b9e6SYuval Mintz 
261fe56b9e6SYuval Mintz 	return 0;
262fe56b9e6SYuval Mintz 
263fe56b9e6SYuval Mintz alloc_err:
264fe56b9e6SYuval Mintz 	DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
265bcd197c8SManish Chopra 	qed_qm_info_free(p_hwfn);
266fe56b9e6SYuval Mintz 	return -ENOMEM;
267fe56b9e6SYuval Mintz }
268fe56b9e6SYuval Mintz 
269fe56b9e6SYuval Mintz int qed_resc_alloc(struct qed_dev *cdev)
270fe56b9e6SYuval Mintz {
271fe56b9e6SYuval Mintz 	struct qed_consq *p_consq;
272fe56b9e6SYuval Mintz 	struct qed_eq *p_eq;
273fe56b9e6SYuval Mintz 	int i, rc = 0;
274fe56b9e6SYuval Mintz 
2751408cc1fSYuval Mintz 	if (IS_VF(cdev))
2761408cc1fSYuval Mintz 		return rc;
2771408cc1fSYuval Mintz 
278fe56b9e6SYuval Mintz 	cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
279fe56b9e6SYuval Mintz 	if (!cdev->fw_data)
280fe56b9e6SYuval Mintz 		return -ENOMEM;
281fe56b9e6SYuval Mintz 
28225c089d7SYuval Mintz 	/* Allocate Memory for the Queue->CID mapping */
28325c089d7SYuval Mintz 	for_each_hwfn(cdev, i) {
28425c089d7SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
28525c089d7SYuval Mintz 		int tx_size = sizeof(struct qed_hw_cid_data) *
28625c089d7SYuval Mintz 				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
28725c089d7SYuval Mintz 		int rx_size = sizeof(struct qed_hw_cid_data) *
28825c089d7SYuval Mintz 				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
28925c089d7SYuval Mintz 
29025c089d7SYuval Mintz 		p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
29125c089d7SYuval Mintz 		if (!p_hwfn->p_tx_cids) {
29225c089d7SYuval Mintz 			DP_NOTICE(p_hwfn,
29325c089d7SYuval Mintz 				  "Failed to allocate memory for Tx Cids\n");
2949b15acbfSDan Carpenter 			rc = -ENOMEM;
29525c089d7SYuval Mintz 			goto alloc_err;
29625c089d7SYuval Mintz 		}
29725c089d7SYuval Mintz 
29825c089d7SYuval Mintz 		p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
29925c089d7SYuval Mintz 		if (!p_hwfn->p_rx_cids) {
30025c089d7SYuval Mintz 			DP_NOTICE(p_hwfn,
30125c089d7SYuval Mintz 				  "Failed to allocate memory for Rx Cids\n");
3029b15acbfSDan Carpenter 			rc = -ENOMEM;
30325c089d7SYuval Mintz 			goto alloc_err;
30425c089d7SYuval Mintz 		}
30525c089d7SYuval Mintz 	}
30625c089d7SYuval Mintz 
307fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
308fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
309fe56b9e6SYuval Mintz 
310fe56b9e6SYuval Mintz 		/* First allocate the context manager structure */
311fe56b9e6SYuval Mintz 		rc = qed_cxt_mngr_alloc(p_hwfn);
312fe56b9e6SYuval Mintz 		if (rc)
313fe56b9e6SYuval Mintz 			goto alloc_err;
314fe56b9e6SYuval Mintz 
315fe56b9e6SYuval Mintz 		/* Set the HW cid/tid numbers (in the contest manager)
316fe56b9e6SYuval Mintz 		 * Must be done prior to any further computations.
317fe56b9e6SYuval Mintz 		 */
318fe56b9e6SYuval Mintz 		rc = qed_cxt_set_pf_params(p_hwfn);
319fe56b9e6SYuval Mintz 		if (rc)
320fe56b9e6SYuval Mintz 			goto alloc_err;
321fe56b9e6SYuval Mintz 
322fe56b9e6SYuval Mintz 		/* Prepare and process QM requirements */
323fe56b9e6SYuval Mintz 		rc = qed_init_qm_info(p_hwfn);
324fe56b9e6SYuval Mintz 		if (rc)
325fe56b9e6SYuval Mintz 			goto alloc_err;
326fe56b9e6SYuval Mintz 
327fe56b9e6SYuval Mintz 		/* Compute the ILT client partition */
328fe56b9e6SYuval Mintz 		rc = qed_cxt_cfg_ilt_compute(p_hwfn);
329fe56b9e6SYuval Mintz 		if (rc)
330fe56b9e6SYuval Mintz 			goto alloc_err;
331fe56b9e6SYuval Mintz 
332fe56b9e6SYuval Mintz 		/* CID map / ILT shadow table / T2
333fe56b9e6SYuval Mintz 		 * The talbes sizes are determined by the computations above
334fe56b9e6SYuval Mintz 		 */
335fe56b9e6SYuval Mintz 		rc = qed_cxt_tables_alloc(p_hwfn);
336fe56b9e6SYuval Mintz 		if (rc)
337fe56b9e6SYuval Mintz 			goto alloc_err;
338fe56b9e6SYuval Mintz 
339fe56b9e6SYuval Mintz 		/* SPQ, must follow ILT because initializes SPQ context */
340fe56b9e6SYuval Mintz 		rc = qed_spq_alloc(p_hwfn);
341fe56b9e6SYuval Mintz 		if (rc)
342fe56b9e6SYuval Mintz 			goto alloc_err;
343fe56b9e6SYuval Mintz 
344fe56b9e6SYuval Mintz 		/* SP status block allocation */
345fe56b9e6SYuval Mintz 		p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
346fe56b9e6SYuval Mintz 							 RESERVED_PTT_DPC);
347fe56b9e6SYuval Mintz 
348fe56b9e6SYuval Mintz 		rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
349fe56b9e6SYuval Mintz 		if (rc)
350fe56b9e6SYuval Mintz 			goto alloc_err;
351fe56b9e6SYuval Mintz 
35232a47e72SYuval Mintz 		rc = qed_iov_alloc(p_hwfn);
35332a47e72SYuval Mintz 		if (rc)
35432a47e72SYuval Mintz 			goto alloc_err;
35532a47e72SYuval Mintz 
356fe56b9e6SYuval Mintz 		/* EQ */
357fe56b9e6SYuval Mintz 		p_eq = qed_eq_alloc(p_hwfn, 256);
3589b15acbfSDan Carpenter 		if (!p_eq) {
3599b15acbfSDan Carpenter 			rc = -ENOMEM;
360fe56b9e6SYuval Mintz 			goto alloc_err;
3619b15acbfSDan Carpenter 		}
362fe56b9e6SYuval Mintz 		p_hwfn->p_eq = p_eq;
363fe56b9e6SYuval Mintz 
364fe56b9e6SYuval Mintz 		p_consq = qed_consq_alloc(p_hwfn);
3659b15acbfSDan Carpenter 		if (!p_consq) {
3669b15acbfSDan Carpenter 			rc = -ENOMEM;
367fe56b9e6SYuval Mintz 			goto alloc_err;
3689b15acbfSDan Carpenter 		}
369fe56b9e6SYuval Mintz 		p_hwfn->p_consq = p_consq;
370fe56b9e6SYuval Mintz 
371fe56b9e6SYuval Mintz 		/* DMA info initialization */
372fe56b9e6SYuval Mintz 		rc = qed_dmae_info_alloc(p_hwfn);
373fe56b9e6SYuval Mintz 		if (rc) {
374fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn,
375fe56b9e6SYuval Mintz 				  "Failed to allocate memory for dmae_info structure\n");
376fe56b9e6SYuval Mintz 			goto alloc_err;
377fe56b9e6SYuval Mintz 		}
378fe56b9e6SYuval Mintz 	}
379fe56b9e6SYuval Mintz 
380fe56b9e6SYuval Mintz 	cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
381fe56b9e6SYuval Mintz 	if (!cdev->reset_stats) {
382fe56b9e6SYuval Mintz 		DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
3839b15acbfSDan Carpenter 		rc = -ENOMEM;
384fe56b9e6SYuval Mintz 		goto alloc_err;
385fe56b9e6SYuval Mintz 	}
386fe56b9e6SYuval Mintz 
387fe56b9e6SYuval Mintz 	return 0;
388fe56b9e6SYuval Mintz 
389fe56b9e6SYuval Mintz alloc_err:
390fe56b9e6SYuval Mintz 	qed_resc_free(cdev);
391fe56b9e6SYuval Mintz 	return rc;
392fe56b9e6SYuval Mintz }
393fe56b9e6SYuval Mintz 
394fe56b9e6SYuval Mintz void qed_resc_setup(struct qed_dev *cdev)
395fe56b9e6SYuval Mintz {
396fe56b9e6SYuval Mintz 	int i;
397fe56b9e6SYuval Mintz 
3981408cc1fSYuval Mintz 	if (IS_VF(cdev))
3991408cc1fSYuval Mintz 		return;
4001408cc1fSYuval Mintz 
401fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
402fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
403fe56b9e6SYuval Mintz 
404fe56b9e6SYuval Mintz 		qed_cxt_mngr_setup(p_hwfn);
405fe56b9e6SYuval Mintz 		qed_spq_setup(p_hwfn);
406fe56b9e6SYuval Mintz 		qed_eq_setup(p_hwfn, p_hwfn->p_eq);
407fe56b9e6SYuval Mintz 		qed_consq_setup(p_hwfn, p_hwfn->p_consq);
408fe56b9e6SYuval Mintz 
409fe56b9e6SYuval Mintz 		/* Read shadow of current MFW mailbox */
410fe56b9e6SYuval Mintz 		qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
411fe56b9e6SYuval Mintz 		memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
412fe56b9e6SYuval Mintz 		       p_hwfn->mcp_info->mfw_mb_cur,
413fe56b9e6SYuval Mintz 		       p_hwfn->mcp_info->mfw_mb_length);
414fe56b9e6SYuval Mintz 
415fe56b9e6SYuval Mintz 		qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
41632a47e72SYuval Mintz 
41732a47e72SYuval Mintz 		qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
418fe56b9e6SYuval Mintz 	}
419fe56b9e6SYuval Mintz }
420fe56b9e6SYuval Mintz 
421fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_CNT          (100)
422fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_TIME         (10)
423fe56b9e6SYuval Mintz int qed_final_cleanup(struct qed_hwfn *p_hwfn,
4240b55e27dSYuval Mintz 		      struct qed_ptt *p_ptt, u16 id, bool is_vf)
425fe56b9e6SYuval Mintz {
426fe56b9e6SYuval Mintz 	u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
427fe56b9e6SYuval Mintz 	int rc = -EBUSY;
428fe56b9e6SYuval Mintz 
429fc48b7a6SYuval Mintz 	addr = GTT_BAR0_MAP_REG_USDM_RAM +
430fc48b7a6SYuval Mintz 		USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
431fe56b9e6SYuval Mintz 
4320b55e27dSYuval Mintz 	if (is_vf)
4330b55e27dSYuval Mintz 		id += 0x10;
4340b55e27dSYuval Mintz 
435fc48b7a6SYuval Mintz 	command |= X_FINAL_CLEANUP_AGG_INT <<
436fc48b7a6SYuval Mintz 		SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
437fc48b7a6SYuval Mintz 	command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
438fc48b7a6SYuval Mintz 	command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
439fc48b7a6SYuval Mintz 	command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
440fe56b9e6SYuval Mintz 
441fe56b9e6SYuval Mintz 	/* Make sure notification is not set before initiating final cleanup */
442fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, addr)) {
443fe56b9e6SYuval Mintz 		DP_NOTICE(
444fe56b9e6SYuval Mintz 			p_hwfn,
445fe56b9e6SYuval Mintz 			"Unexpected; Found final cleanup notification before initiating final cleanup\n");
446fe56b9e6SYuval Mintz 		REG_WR(p_hwfn, addr, 0);
447fe56b9e6SYuval Mintz 	}
448fe56b9e6SYuval Mintz 
449fe56b9e6SYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
450fe56b9e6SYuval Mintz 		   "Sending final cleanup for PFVF[%d] [Command %08x\n]",
451fe56b9e6SYuval Mintz 		   id, command);
452fe56b9e6SYuval Mintz 
453fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
454fe56b9e6SYuval Mintz 
455fe56b9e6SYuval Mintz 	/* Poll until completion */
456fe56b9e6SYuval Mintz 	while (!REG_RD(p_hwfn, addr) && count--)
457fe56b9e6SYuval Mintz 		msleep(FINAL_CLEANUP_POLL_TIME);
458fe56b9e6SYuval Mintz 
459fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, addr))
460fe56b9e6SYuval Mintz 		rc = 0;
461fe56b9e6SYuval Mintz 	else
462fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn,
463fe56b9e6SYuval Mintz 			  "Failed to receive FW final cleanup notification\n");
464fe56b9e6SYuval Mintz 
465fe56b9e6SYuval Mintz 	/* Cleanup afterwards */
466fe56b9e6SYuval Mintz 	REG_WR(p_hwfn, addr, 0);
467fe56b9e6SYuval Mintz 
468fe56b9e6SYuval Mintz 	return rc;
469fe56b9e6SYuval Mintz }
470fe56b9e6SYuval Mintz 
471fe56b9e6SYuval Mintz static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
472fe56b9e6SYuval Mintz {
473fe56b9e6SYuval Mintz 	int hw_mode = 0;
474fe56b9e6SYuval Mintz 
47512e09c69SYuval Mintz 	hw_mode = (1 << MODE_BB_B0);
476fe56b9e6SYuval Mintz 
477fe56b9e6SYuval Mintz 	switch (p_hwfn->cdev->num_ports_in_engines) {
478fe56b9e6SYuval Mintz 	case 1:
479fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
480fe56b9e6SYuval Mintz 		break;
481fe56b9e6SYuval Mintz 	case 2:
482fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
483fe56b9e6SYuval Mintz 		break;
484fe56b9e6SYuval Mintz 	case 4:
485fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
486fe56b9e6SYuval Mintz 		break;
487fe56b9e6SYuval Mintz 	default:
488fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
489fe56b9e6SYuval Mintz 			  p_hwfn->cdev->num_ports_in_engines);
490fe56b9e6SYuval Mintz 		return;
491fe56b9e6SYuval Mintz 	}
492fe56b9e6SYuval Mintz 
493fe56b9e6SYuval Mintz 	switch (p_hwfn->cdev->mf_mode) {
494fc48b7a6SYuval Mintz 	case QED_MF_DEFAULT:
495fc48b7a6SYuval Mintz 	case QED_MF_NPAR:
496fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SI;
497fe56b9e6SYuval Mintz 		break;
498fc48b7a6SYuval Mintz 	case QED_MF_OVLAN:
499fc48b7a6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SD;
500fc48b7a6SYuval Mintz 		break;
501fe56b9e6SYuval Mintz 	default:
502fc48b7a6SYuval Mintz 		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
503fc48b7a6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SI;
504fe56b9e6SYuval Mintz 	}
505fe56b9e6SYuval Mintz 
506fe56b9e6SYuval Mintz 	hw_mode |= 1 << MODE_ASIC;
507fe56b9e6SYuval Mintz 
508fe56b9e6SYuval Mintz 	p_hwfn->hw_info.hw_mode = hw_mode;
509fe56b9e6SYuval Mintz }
510fe56b9e6SYuval Mintz 
511fe56b9e6SYuval Mintz /* Init run time data for all PFs on an engine. */
512fe56b9e6SYuval Mintz static void qed_init_cau_rt_data(struct qed_dev *cdev)
513fe56b9e6SYuval Mintz {
514fe56b9e6SYuval Mintz 	u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
515fe56b9e6SYuval Mintz 	int i, sb_id;
516fe56b9e6SYuval Mintz 
517fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
518fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
519fe56b9e6SYuval Mintz 		struct qed_igu_info *p_igu_info;
520fe56b9e6SYuval Mintz 		struct qed_igu_block *p_block;
521fe56b9e6SYuval Mintz 		struct cau_sb_entry sb_entry;
522fe56b9e6SYuval Mintz 
523fe56b9e6SYuval Mintz 		p_igu_info = p_hwfn->hw_info.p_igu_info;
524fe56b9e6SYuval Mintz 
525fe56b9e6SYuval Mintz 		for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
526fe56b9e6SYuval Mintz 		     sb_id++) {
527fe56b9e6SYuval Mintz 			p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
528fe56b9e6SYuval Mintz 			if (!p_block->is_pf)
529fe56b9e6SYuval Mintz 				continue;
530fe56b9e6SYuval Mintz 
531fe56b9e6SYuval Mintz 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
532fe56b9e6SYuval Mintz 					      p_block->function_id,
533fe56b9e6SYuval Mintz 					      0, 0);
534fe56b9e6SYuval Mintz 			STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
535fe56b9e6SYuval Mintz 					 sb_entry);
536fe56b9e6SYuval Mintz 		}
537fe56b9e6SYuval Mintz 	}
538fe56b9e6SYuval Mintz }
539fe56b9e6SYuval Mintz 
540fe56b9e6SYuval Mintz static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
541fe56b9e6SYuval Mintz 			      struct qed_ptt *p_ptt,
542fe56b9e6SYuval Mintz 			      int hw_mode)
543fe56b9e6SYuval Mintz {
544fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
545fe56b9e6SYuval Mintz 	struct qed_qm_common_rt_init_params params;
546fe56b9e6SYuval Mintz 	struct qed_dev *cdev = p_hwfn->cdev;
5471408cc1fSYuval Mintz 	u32 concrete_fid;
548fe56b9e6SYuval Mintz 	int rc = 0;
5491408cc1fSYuval Mintz 	u8 vf_id;
550fe56b9e6SYuval Mintz 
551fe56b9e6SYuval Mintz 	qed_init_cau_rt_data(cdev);
552fe56b9e6SYuval Mintz 
553fe56b9e6SYuval Mintz 	/* Program GTT windows */
554fe56b9e6SYuval Mintz 	qed_gtt_init(p_hwfn);
555fe56b9e6SYuval Mintz 
556fe56b9e6SYuval Mintz 	if (p_hwfn->mcp_info) {
557fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.bandwidth_max)
558fe56b9e6SYuval Mintz 			qm_info->pf_rl_en = 1;
559fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.bandwidth_min)
560fe56b9e6SYuval Mintz 			qm_info->pf_wfq_en = 1;
561fe56b9e6SYuval Mintz 	}
562fe56b9e6SYuval Mintz 
563fe56b9e6SYuval Mintz 	memset(&params, 0, sizeof(params));
564fe56b9e6SYuval Mintz 	params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
565fe56b9e6SYuval Mintz 	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
566fe56b9e6SYuval Mintz 	params.pf_rl_en = qm_info->pf_rl_en;
567fe56b9e6SYuval Mintz 	params.pf_wfq_en = qm_info->pf_wfq_en;
568fe56b9e6SYuval Mintz 	params.vport_rl_en = qm_info->vport_rl_en;
569fe56b9e6SYuval Mintz 	params.vport_wfq_en = qm_info->vport_wfq_en;
570fe56b9e6SYuval Mintz 	params.port_params = qm_info->qm_port_params;
571fe56b9e6SYuval Mintz 
572fe56b9e6SYuval Mintz 	qed_qm_common_rt_init(p_hwfn, &params);
573fe56b9e6SYuval Mintz 
574fe56b9e6SYuval Mintz 	qed_cxt_hw_init_common(p_hwfn);
575fe56b9e6SYuval Mintz 
576fe56b9e6SYuval Mintz 	/* Close gate from NIG to BRB/Storm; By default they are open, but
577fe56b9e6SYuval Mintz 	 * we close them to prevent NIG from passing data to reset blocks.
578fe56b9e6SYuval Mintz 	 * Should have been done in the ENGINE phase, but init-tool lacks
579fe56b9e6SYuval Mintz 	 * proper port-pretend capabilities.
580fe56b9e6SYuval Mintz 	 */
581fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
582fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
583fe56b9e6SYuval Mintz 	qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
584fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
585fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
586fe56b9e6SYuval Mintz 	qed_port_unpretend(p_hwfn, p_ptt);
587fe56b9e6SYuval Mintz 
588fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
589fe56b9e6SYuval Mintz 	if (rc != 0)
590fe56b9e6SYuval Mintz 		return rc;
591fe56b9e6SYuval Mintz 
592fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
593fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
594fe56b9e6SYuval Mintz 
595fe56b9e6SYuval Mintz 	/* Disable relaxed ordering in the PCI config space */
596fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, 0x20b4,
597fe56b9e6SYuval Mintz 	       qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
598fe56b9e6SYuval Mintz 
5991408cc1fSYuval Mintz 	for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
6001408cc1fSYuval Mintz 		concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
6011408cc1fSYuval Mintz 		qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
6021408cc1fSYuval Mintz 		qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
6031408cc1fSYuval Mintz 	}
6041408cc1fSYuval Mintz 	/* pretend to original PF */
6051408cc1fSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
6061408cc1fSYuval Mintz 
607fe56b9e6SYuval Mintz 	return rc;
608fe56b9e6SYuval Mintz }
609fe56b9e6SYuval Mintz 
610fe56b9e6SYuval Mintz static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
611fe56b9e6SYuval Mintz 			    struct qed_ptt *p_ptt,
612fe56b9e6SYuval Mintz 			    int hw_mode)
613fe56b9e6SYuval Mintz {
614fe56b9e6SYuval Mintz 	int rc = 0;
615fe56b9e6SYuval Mintz 
616fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
617fe56b9e6SYuval Mintz 			  hw_mode);
618fe56b9e6SYuval Mintz 	return rc;
619fe56b9e6SYuval Mintz }
620fe56b9e6SYuval Mintz 
621fe56b9e6SYuval Mintz static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
622fe56b9e6SYuval Mintz 			  struct qed_ptt *p_ptt,
623464f6645SManish Chopra 			  struct qed_tunn_start_params *p_tunn,
624fe56b9e6SYuval Mintz 			  int hw_mode,
625fe56b9e6SYuval Mintz 			  bool b_hw_start,
626fe56b9e6SYuval Mintz 			  enum qed_int_mode int_mode,
627fe56b9e6SYuval Mintz 			  bool allow_npar_tx_switch)
628fe56b9e6SYuval Mintz {
629fe56b9e6SYuval Mintz 	u8 rel_pf_id = p_hwfn->rel_pf_id;
630fe56b9e6SYuval Mintz 	int rc = 0;
631fe56b9e6SYuval Mintz 
632fe56b9e6SYuval Mintz 	if (p_hwfn->mcp_info) {
633fe56b9e6SYuval Mintz 		struct qed_mcp_function_info *p_info;
634fe56b9e6SYuval Mintz 
635fe56b9e6SYuval Mintz 		p_info = &p_hwfn->mcp_info->func_info;
636fe56b9e6SYuval Mintz 		if (p_info->bandwidth_min)
637fe56b9e6SYuval Mintz 			p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
638fe56b9e6SYuval Mintz 
639fe56b9e6SYuval Mintz 		/* Update rate limit once we'll actually have a link */
6404b01e519SManish Chopra 		p_hwfn->qm_info.pf_rl = 100000;
641fe56b9e6SYuval Mintz 	}
642fe56b9e6SYuval Mintz 
643fe56b9e6SYuval Mintz 	qed_cxt_hw_init_pf(p_hwfn);
644fe56b9e6SYuval Mintz 
645fe56b9e6SYuval Mintz 	qed_int_igu_init_rt(p_hwfn);
646fe56b9e6SYuval Mintz 
647fe56b9e6SYuval Mintz 	/* Set VLAN in NIG if needed */
648fe56b9e6SYuval Mintz 	if (hw_mode & (1 << MODE_MF_SD)) {
649fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
650fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
651fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
652fe56b9e6SYuval Mintz 			     p_hwfn->hw_info.ovlan);
653fe56b9e6SYuval Mintz 	}
654fe56b9e6SYuval Mintz 
655fe56b9e6SYuval Mintz 	/* Enable classification by MAC if needed */
65687aec47dSDan Carpenter 	if (hw_mode & (1 << MODE_MF_SI)) {
657fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
658fe56b9e6SYuval Mintz 			   "Configuring TAGMAC_CLS_TYPE\n");
659fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn,
660fe56b9e6SYuval Mintz 			     NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
661fe56b9e6SYuval Mintz 	}
662fe56b9e6SYuval Mintz 
663fe56b9e6SYuval Mintz 	/* Protocl Configuration  */
664fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
665fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
666fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
667fe56b9e6SYuval Mintz 
668fe56b9e6SYuval Mintz 	/* Cleanup chip from previous driver if such remains exist */
6690b55e27dSYuval Mintz 	rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
670fe56b9e6SYuval Mintz 	if (rc != 0)
671fe56b9e6SYuval Mintz 		return rc;
672fe56b9e6SYuval Mintz 
673fe56b9e6SYuval Mintz 	/* PF Init sequence */
674fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
675fe56b9e6SYuval Mintz 	if (rc)
676fe56b9e6SYuval Mintz 		return rc;
677fe56b9e6SYuval Mintz 
678fe56b9e6SYuval Mintz 	/* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
679fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
680fe56b9e6SYuval Mintz 	if (rc)
681fe56b9e6SYuval Mintz 		return rc;
682fe56b9e6SYuval Mintz 
683fe56b9e6SYuval Mintz 	/* Pure runtime initializations - directly to the HW  */
684fe56b9e6SYuval Mintz 	qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
685fe56b9e6SYuval Mintz 
686fe56b9e6SYuval Mintz 	if (b_hw_start) {
687fe56b9e6SYuval Mintz 		/* enable interrupts */
688fe56b9e6SYuval Mintz 		qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
689fe56b9e6SYuval Mintz 
690fe56b9e6SYuval Mintz 		/* send function start command */
691464f6645SManish Chopra 		rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode);
692fe56b9e6SYuval Mintz 		if (rc)
693fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
694fe56b9e6SYuval Mintz 	}
695fe56b9e6SYuval Mintz 	return rc;
696fe56b9e6SYuval Mintz }
697fe56b9e6SYuval Mintz 
698fe56b9e6SYuval Mintz static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
699fe56b9e6SYuval Mintz 			       struct qed_ptt *p_ptt,
700fe56b9e6SYuval Mintz 			       u8 enable)
701fe56b9e6SYuval Mintz {
702fe56b9e6SYuval Mintz 	u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
703fe56b9e6SYuval Mintz 
704fe56b9e6SYuval Mintz 	/* Change PF in PXP */
705fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt,
706fe56b9e6SYuval Mintz 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
707fe56b9e6SYuval Mintz 
708fe56b9e6SYuval Mintz 	/* wait until value is set - try for 1 second every 50us */
709fe56b9e6SYuval Mintz 	for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
710fe56b9e6SYuval Mintz 		val = qed_rd(p_hwfn, p_ptt,
711fe56b9e6SYuval Mintz 			     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
712fe56b9e6SYuval Mintz 		if (val == set_val)
713fe56b9e6SYuval Mintz 			break;
714fe56b9e6SYuval Mintz 
715fe56b9e6SYuval Mintz 		usleep_range(50, 60);
716fe56b9e6SYuval Mintz 	}
717fe56b9e6SYuval Mintz 
718fe56b9e6SYuval Mintz 	if (val != set_val) {
719fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn,
720fe56b9e6SYuval Mintz 			  "PFID_ENABLE_MASTER wasn't changed after a second\n");
721fe56b9e6SYuval Mintz 		return -EAGAIN;
722fe56b9e6SYuval Mintz 	}
723fe56b9e6SYuval Mintz 
724fe56b9e6SYuval Mintz 	return 0;
725fe56b9e6SYuval Mintz }
726fe56b9e6SYuval Mintz 
727fe56b9e6SYuval Mintz static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
728fe56b9e6SYuval Mintz 				struct qed_ptt *p_main_ptt)
729fe56b9e6SYuval Mintz {
730fe56b9e6SYuval Mintz 	/* Read shadow of current MFW mailbox */
731fe56b9e6SYuval Mintz 	qed_mcp_read_mb(p_hwfn, p_main_ptt);
732fe56b9e6SYuval Mintz 	memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
733fe56b9e6SYuval Mintz 	       p_hwfn->mcp_info->mfw_mb_cur,
734fe56b9e6SYuval Mintz 	       p_hwfn->mcp_info->mfw_mb_length);
735fe56b9e6SYuval Mintz }
736fe56b9e6SYuval Mintz 
737fe56b9e6SYuval Mintz int qed_hw_init(struct qed_dev *cdev,
738464f6645SManish Chopra 		struct qed_tunn_start_params *p_tunn,
739fe56b9e6SYuval Mintz 		bool b_hw_start,
740fe56b9e6SYuval Mintz 		enum qed_int_mode int_mode,
741fe56b9e6SYuval Mintz 		bool allow_npar_tx_switch,
742fe56b9e6SYuval Mintz 		const u8 *bin_fw_data)
743fe56b9e6SYuval Mintz {
74486622ee7SYuval Mintz 	u32 load_code, param;
745fe56b9e6SYuval Mintz 	int rc, mfw_rc, i;
746fe56b9e6SYuval Mintz 
7471408cc1fSYuval Mintz 	if (IS_PF(cdev)) {
748fe56b9e6SYuval Mintz 		rc = qed_init_fw_data(cdev, bin_fw_data);
749fe56b9e6SYuval Mintz 		if (rc != 0)
750fe56b9e6SYuval Mintz 			return rc;
7511408cc1fSYuval Mintz 	}
752fe56b9e6SYuval Mintz 
753fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
754fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
755fe56b9e6SYuval Mintz 
7561408cc1fSYuval Mintz 		if (IS_VF(cdev)) {
7571408cc1fSYuval Mintz 			p_hwfn->b_int_enabled = 1;
7581408cc1fSYuval Mintz 			continue;
7591408cc1fSYuval Mintz 		}
7601408cc1fSYuval Mintz 
761fe56b9e6SYuval Mintz 		/* Enable DMAE in PXP */
762fe56b9e6SYuval Mintz 		rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
763fe56b9e6SYuval Mintz 
764fe56b9e6SYuval Mintz 		qed_calc_hw_mode(p_hwfn);
765fe56b9e6SYuval Mintz 
766fe56b9e6SYuval Mintz 		rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
767fe56b9e6SYuval Mintz 				      &load_code);
768fe56b9e6SYuval Mintz 		if (rc) {
769fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
770fe56b9e6SYuval Mintz 			return rc;
771fe56b9e6SYuval Mintz 		}
772fe56b9e6SYuval Mintz 
773fe56b9e6SYuval Mintz 		qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
774fe56b9e6SYuval Mintz 
775fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
776fe56b9e6SYuval Mintz 			   "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
777fe56b9e6SYuval Mintz 			   rc, load_code);
778fe56b9e6SYuval Mintz 
779fe56b9e6SYuval Mintz 		p_hwfn->first_on_engine = (load_code ==
780fe56b9e6SYuval Mintz 					   FW_MSG_CODE_DRV_LOAD_ENGINE);
781fe56b9e6SYuval Mintz 
782fe56b9e6SYuval Mintz 		switch (load_code) {
783fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_ENGINE:
784fe56b9e6SYuval Mintz 			rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
785fe56b9e6SYuval Mintz 						p_hwfn->hw_info.hw_mode);
786fe56b9e6SYuval Mintz 			if (rc)
787fe56b9e6SYuval Mintz 				break;
788fe56b9e6SYuval Mintz 		/* Fall into */
789fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_PORT:
790fe56b9e6SYuval Mintz 			rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
791fe56b9e6SYuval Mintz 					      p_hwfn->hw_info.hw_mode);
792fe56b9e6SYuval Mintz 			if (rc)
793fe56b9e6SYuval Mintz 				break;
794fe56b9e6SYuval Mintz 
795fe56b9e6SYuval Mintz 		/* Fall into */
796fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
797fe56b9e6SYuval Mintz 			rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
798464f6645SManish Chopra 					    p_tunn, p_hwfn->hw_info.hw_mode,
799fe56b9e6SYuval Mintz 					    b_hw_start, int_mode,
800fe56b9e6SYuval Mintz 					    allow_npar_tx_switch);
801fe56b9e6SYuval Mintz 			break;
802fe56b9e6SYuval Mintz 		default:
803fe56b9e6SYuval Mintz 			rc = -EINVAL;
804fe56b9e6SYuval Mintz 			break;
805fe56b9e6SYuval Mintz 		}
806fe56b9e6SYuval Mintz 
807fe56b9e6SYuval Mintz 		if (rc)
808fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn,
809fe56b9e6SYuval Mintz 				  "init phase failed for loadcode 0x%x (rc %d)\n",
810fe56b9e6SYuval Mintz 				   load_code, rc);
811fe56b9e6SYuval Mintz 
812fe56b9e6SYuval Mintz 		/* ACK mfw regardless of success or failure of initialization */
813fe56b9e6SYuval Mintz 		mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
814fe56b9e6SYuval Mintz 				     DRV_MSG_CODE_LOAD_DONE,
815fe56b9e6SYuval Mintz 				     0, &load_code, &param);
816fe56b9e6SYuval Mintz 		if (rc)
817fe56b9e6SYuval Mintz 			return rc;
818fe56b9e6SYuval Mintz 		if (mfw_rc) {
819fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
820fe56b9e6SYuval Mintz 			return mfw_rc;
821fe56b9e6SYuval Mintz 		}
822fe56b9e6SYuval Mintz 
823fe56b9e6SYuval Mintz 		p_hwfn->hw_init_done = true;
824fe56b9e6SYuval Mintz 	}
825fe56b9e6SYuval Mintz 
826fe56b9e6SYuval Mintz 	return 0;
827fe56b9e6SYuval Mintz }
828fe56b9e6SYuval Mintz 
829fe56b9e6SYuval Mintz #define QED_HW_STOP_RETRY_LIMIT (10)
8308c925c44SYuval Mintz static inline void qed_hw_timers_stop(struct qed_dev *cdev,
8318c925c44SYuval Mintz 				      struct qed_hwfn *p_hwfn,
8328c925c44SYuval Mintz 				      struct qed_ptt *p_ptt)
8338c925c44SYuval Mintz {
8348c925c44SYuval Mintz 	int i;
8358c925c44SYuval Mintz 
8368c925c44SYuval Mintz 	/* close timers */
8378c925c44SYuval Mintz 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
8388c925c44SYuval Mintz 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
8398c925c44SYuval Mintz 
8408c925c44SYuval Mintz 	for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
8418c925c44SYuval Mintz 		if ((!qed_rd(p_hwfn, p_ptt,
8428c925c44SYuval Mintz 			     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
8438c925c44SYuval Mintz 		    (!qed_rd(p_hwfn, p_ptt,
8448c925c44SYuval Mintz 			     TM_REG_PF_SCAN_ACTIVE_TASK)))
8458c925c44SYuval Mintz 			break;
8468c925c44SYuval Mintz 
8478c925c44SYuval Mintz 		/* Dependent on number of connection/tasks, possibly
8488c925c44SYuval Mintz 		 * 1ms sleep is required between polls
8498c925c44SYuval Mintz 		 */
8508c925c44SYuval Mintz 		usleep_range(1000, 2000);
8518c925c44SYuval Mintz 	}
8528c925c44SYuval Mintz 
8538c925c44SYuval Mintz 	if (i < QED_HW_STOP_RETRY_LIMIT)
8548c925c44SYuval Mintz 		return;
8558c925c44SYuval Mintz 
8568c925c44SYuval Mintz 	DP_NOTICE(p_hwfn,
8578c925c44SYuval Mintz 		  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
8588c925c44SYuval Mintz 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
8598c925c44SYuval Mintz 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
8608c925c44SYuval Mintz }
8618c925c44SYuval Mintz 
8628c925c44SYuval Mintz void qed_hw_timers_stop_all(struct qed_dev *cdev)
8638c925c44SYuval Mintz {
8648c925c44SYuval Mintz 	int j;
8658c925c44SYuval Mintz 
8668c925c44SYuval Mintz 	for_each_hwfn(cdev, j) {
8678c925c44SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
8688c925c44SYuval Mintz 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
8698c925c44SYuval Mintz 
8708c925c44SYuval Mintz 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
8718c925c44SYuval Mintz 	}
8728c925c44SYuval Mintz }
8738c925c44SYuval Mintz 
874fe56b9e6SYuval Mintz int qed_hw_stop(struct qed_dev *cdev)
875fe56b9e6SYuval Mintz {
876fe56b9e6SYuval Mintz 	int rc = 0, t_rc;
8778c925c44SYuval Mintz 	int j;
878fe56b9e6SYuval Mintz 
879fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, j) {
880fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
881fe56b9e6SYuval Mintz 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
882fe56b9e6SYuval Mintz 
883fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
884fe56b9e6SYuval Mintz 
8851408cc1fSYuval Mintz 		if (IS_VF(cdev)) {
8860b55e27dSYuval Mintz 			qed_vf_pf_int_cleanup(p_hwfn);
8871408cc1fSYuval Mintz 			continue;
8881408cc1fSYuval Mintz 		}
8891408cc1fSYuval Mintz 
890fe56b9e6SYuval Mintz 		/* mark the hw as uninitialized... */
891fe56b9e6SYuval Mintz 		p_hwfn->hw_init_done = false;
892fe56b9e6SYuval Mintz 
893fe56b9e6SYuval Mintz 		rc = qed_sp_pf_stop(p_hwfn);
894fe56b9e6SYuval Mintz 		if (rc)
8958c925c44SYuval Mintz 			DP_NOTICE(p_hwfn,
8968c925c44SYuval Mintz 				  "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
897fe56b9e6SYuval Mintz 
898fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt,
899fe56b9e6SYuval Mintz 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
900fe56b9e6SYuval Mintz 
901fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
902fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
903fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
904fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
905fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
906fe56b9e6SYuval Mintz 
9078c925c44SYuval Mintz 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
908fe56b9e6SYuval Mintz 
909fe56b9e6SYuval Mintz 		/* Disable Attention Generation */
910fe56b9e6SYuval Mintz 		qed_int_igu_disable_int(p_hwfn, p_ptt);
911fe56b9e6SYuval Mintz 
912fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
913fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
914fe56b9e6SYuval Mintz 
915fe56b9e6SYuval Mintz 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
916fe56b9e6SYuval Mintz 
917fe56b9e6SYuval Mintz 		/* Need to wait 1ms to guarantee SBs are cleared */
918fe56b9e6SYuval Mintz 		usleep_range(1000, 2000);
919fe56b9e6SYuval Mintz 	}
920fe56b9e6SYuval Mintz 
9211408cc1fSYuval Mintz 	if (IS_PF(cdev)) {
922fe56b9e6SYuval Mintz 		/* Disable DMAE in PXP - in CMT, this should only be done for
923fe56b9e6SYuval Mintz 		 * first hw-function, and only after all transactions have
924fe56b9e6SYuval Mintz 		 * stopped for all active hw-functions.
925fe56b9e6SYuval Mintz 		 */
926fe56b9e6SYuval Mintz 		t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
9271408cc1fSYuval Mintz 					   cdev->hwfns[0].p_main_ptt, false);
928fe56b9e6SYuval Mintz 		if (t_rc != 0)
929fe56b9e6SYuval Mintz 			rc = t_rc;
9301408cc1fSYuval Mintz 	}
931fe56b9e6SYuval Mintz 
932fe56b9e6SYuval Mintz 	return rc;
933fe56b9e6SYuval Mintz }
934fe56b9e6SYuval Mintz 
935cee4d264SManish Chopra void qed_hw_stop_fastpath(struct qed_dev *cdev)
936cee4d264SManish Chopra {
9378c925c44SYuval Mintz 	int j;
938cee4d264SManish Chopra 
939cee4d264SManish Chopra 	for_each_hwfn(cdev, j) {
940cee4d264SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
941cee4d264SManish Chopra 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
942cee4d264SManish Chopra 
943dacd88d6SYuval Mintz 		if (IS_VF(cdev)) {
944dacd88d6SYuval Mintz 			qed_vf_pf_int_cleanup(p_hwfn);
945dacd88d6SYuval Mintz 			continue;
946dacd88d6SYuval Mintz 		}
947dacd88d6SYuval Mintz 
948cee4d264SManish Chopra 		DP_VERBOSE(p_hwfn,
949cee4d264SManish Chopra 			   NETIF_MSG_IFDOWN,
950cee4d264SManish Chopra 			   "Shutting down the fastpath\n");
951cee4d264SManish Chopra 
952cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt,
953cee4d264SManish Chopra 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
954cee4d264SManish Chopra 
955cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
956cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
957cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
958cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
959cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
960cee4d264SManish Chopra 
961cee4d264SManish Chopra 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
962cee4d264SManish Chopra 
963cee4d264SManish Chopra 		/* Need to wait 1ms to guarantee SBs are cleared */
964cee4d264SManish Chopra 		usleep_range(1000, 2000);
965cee4d264SManish Chopra 	}
966cee4d264SManish Chopra }
967cee4d264SManish Chopra 
968cee4d264SManish Chopra void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
969cee4d264SManish Chopra {
970dacd88d6SYuval Mintz 	if (IS_VF(p_hwfn->cdev))
971dacd88d6SYuval Mintz 		return;
972dacd88d6SYuval Mintz 
973cee4d264SManish Chopra 	/* Re-open incoming traffic */
974cee4d264SManish Chopra 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
975cee4d264SManish Chopra 	       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
976cee4d264SManish Chopra }
977cee4d264SManish Chopra 
978fe56b9e6SYuval Mintz static int qed_reg_assert(struct qed_hwfn *hwfn,
979fe56b9e6SYuval Mintz 			  struct qed_ptt *ptt, u32 reg,
980fe56b9e6SYuval Mintz 			  bool expected)
981fe56b9e6SYuval Mintz {
982fe56b9e6SYuval Mintz 	u32 assert_val = qed_rd(hwfn, ptt, reg);
983fe56b9e6SYuval Mintz 
984fe56b9e6SYuval Mintz 	if (assert_val != expected) {
985fe56b9e6SYuval Mintz 		DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
986fe56b9e6SYuval Mintz 			  reg, expected);
987fe56b9e6SYuval Mintz 		return -EINVAL;
988fe56b9e6SYuval Mintz 	}
989fe56b9e6SYuval Mintz 
990fe56b9e6SYuval Mintz 	return 0;
991fe56b9e6SYuval Mintz }
992fe56b9e6SYuval Mintz 
993fe56b9e6SYuval Mintz int qed_hw_reset(struct qed_dev *cdev)
994fe56b9e6SYuval Mintz {
995fe56b9e6SYuval Mintz 	int rc = 0;
996fe56b9e6SYuval Mintz 	u32 unload_resp, unload_param;
997fe56b9e6SYuval Mintz 	int i;
998fe56b9e6SYuval Mintz 
999fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
1000fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1001fe56b9e6SYuval Mintz 
10021408cc1fSYuval Mintz 		if (IS_VF(cdev)) {
10030b55e27dSYuval Mintz 			rc = qed_vf_pf_reset(p_hwfn);
10040b55e27dSYuval Mintz 			if (rc)
10050b55e27dSYuval Mintz 				return rc;
10061408cc1fSYuval Mintz 			continue;
10071408cc1fSYuval Mintz 		}
10081408cc1fSYuval Mintz 
1009fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
1010fe56b9e6SYuval Mintz 
1011fe56b9e6SYuval Mintz 		/* Check for incorrect states */
1012fe56b9e6SYuval Mintz 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
1013fe56b9e6SYuval Mintz 			       QM_REG_USG_CNT_PF_TX, 0);
1014fe56b9e6SYuval Mintz 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
1015fe56b9e6SYuval Mintz 			       QM_REG_USG_CNT_PF_OTHER, 0);
1016fe56b9e6SYuval Mintz 
1017fe56b9e6SYuval Mintz 		/* Disable PF in HW blocks */
1018fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
1019fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
1020fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1021fe56b9e6SYuval Mintz 		       TCFC_REG_STRONG_ENABLE_PF, 0);
1022fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1023fe56b9e6SYuval Mintz 		       CCFC_REG_STRONG_ENABLE_PF, 0);
1024fe56b9e6SYuval Mintz 
1025fe56b9e6SYuval Mintz 		/* Send unload command to MCP */
1026fe56b9e6SYuval Mintz 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1027fe56b9e6SYuval Mintz 				 DRV_MSG_CODE_UNLOAD_REQ,
1028fe56b9e6SYuval Mintz 				 DRV_MB_PARAM_UNLOAD_WOL_MCP,
1029fe56b9e6SYuval Mintz 				 &unload_resp, &unload_param);
1030fe56b9e6SYuval Mintz 		if (rc) {
1031fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
1032fe56b9e6SYuval Mintz 			unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
1033fe56b9e6SYuval Mintz 		}
1034fe56b9e6SYuval Mintz 
1035fe56b9e6SYuval Mintz 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1036fe56b9e6SYuval Mintz 				 DRV_MSG_CODE_UNLOAD_DONE,
1037fe56b9e6SYuval Mintz 				 0, &unload_resp, &unload_param);
1038fe56b9e6SYuval Mintz 		if (rc) {
1039fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
1040fe56b9e6SYuval Mintz 			return rc;
1041fe56b9e6SYuval Mintz 		}
1042fe56b9e6SYuval Mintz 	}
1043fe56b9e6SYuval Mintz 
1044fe56b9e6SYuval Mintz 	return rc;
1045fe56b9e6SYuval Mintz }
1046fe56b9e6SYuval Mintz 
1047fe56b9e6SYuval Mintz /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
1048fe56b9e6SYuval Mintz static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
1049fe56b9e6SYuval Mintz {
1050fe56b9e6SYuval Mintz 	qed_ptt_pool_free(p_hwfn);
1051fe56b9e6SYuval Mintz 	kfree(p_hwfn->hw_info.p_igu_info);
1052fe56b9e6SYuval Mintz }
1053fe56b9e6SYuval Mintz 
1054fe56b9e6SYuval Mintz /* Setup bar access */
105512e09c69SYuval Mintz static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
1056fe56b9e6SYuval Mintz {
1057fe56b9e6SYuval Mintz 	/* clear indirect access */
1058fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
1059fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
1060fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
1061fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
1062fe56b9e6SYuval Mintz 
1063fe56b9e6SYuval Mintz 	/* Clean Previous errors if such exist */
1064fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1065fe56b9e6SYuval Mintz 	       PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
1066fe56b9e6SYuval Mintz 	       1 << p_hwfn->abs_pf_id);
1067fe56b9e6SYuval Mintz 
1068fe56b9e6SYuval Mintz 	/* enable internal target-read */
1069fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1070fe56b9e6SYuval Mintz 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1071fe56b9e6SYuval Mintz }
1072fe56b9e6SYuval Mintz 
1073fe56b9e6SYuval Mintz static void get_function_id(struct qed_hwfn *p_hwfn)
1074fe56b9e6SYuval Mintz {
1075fe56b9e6SYuval Mintz 	/* ME Register */
1076fe56b9e6SYuval Mintz 	p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
1077fe56b9e6SYuval Mintz 
1078fe56b9e6SYuval Mintz 	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
1079fe56b9e6SYuval Mintz 
1080fe56b9e6SYuval Mintz 	p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
1081fe56b9e6SYuval Mintz 	p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1082fe56b9e6SYuval Mintz 				      PXP_CONCRETE_FID_PFID);
1083fe56b9e6SYuval Mintz 	p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1084fe56b9e6SYuval Mintz 				    PXP_CONCRETE_FID_PORT);
1085fe56b9e6SYuval Mintz }
1086fe56b9e6SYuval Mintz 
108725c089d7SYuval Mintz static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
108825c089d7SYuval Mintz {
108925c089d7SYuval Mintz 	u32 *feat_num = p_hwfn->hw_info.feat_num;
109025c089d7SYuval Mintz 	int num_features = 1;
109125c089d7SYuval Mintz 
109225c089d7SYuval Mintz 	feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
109325c089d7SYuval Mintz 						num_features,
109425c089d7SYuval Mintz 					RESC_NUM(p_hwfn, QED_L2_QUEUE));
109525c089d7SYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
109625c089d7SYuval Mintz 		   "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
109725c089d7SYuval Mintz 		   feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
109825c089d7SYuval Mintz 		   num_features);
109925c089d7SYuval Mintz }
110025c089d7SYuval Mintz 
1101fe56b9e6SYuval Mintz static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1102fe56b9e6SYuval Mintz {
1103fe56b9e6SYuval Mintz 	u32 *resc_start = p_hwfn->hw_info.resc_start;
11041408cc1fSYuval Mintz 	u8 num_funcs = p_hwfn->num_funcs_on_engine;
1105fe56b9e6SYuval Mintz 	u32 *resc_num = p_hwfn->hw_info.resc_num;
11064ac801b7SYuval Mintz 	struct qed_sb_cnt_info sb_cnt_info;
11071408cc1fSYuval Mintz 	int i;
1108fe56b9e6SYuval Mintz 
11094ac801b7SYuval Mintz 	memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
11104ac801b7SYuval Mintz 	qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
11114ac801b7SYuval Mintz 
1112fe56b9e6SYuval Mintz 	resc_num[QED_SB] = min_t(u32,
1113fe56b9e6SYuval Mintz 				 (MAX_SB_PER_PATH_BB / num_funcs),
11144ac801b7SYuval Mintz 				 sb_cnt_info.sb_cnt);
111525c089d7SYuval Mintz 	resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
1116fe56b9e6SYuval Mintz 	resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
111725c089d7SYuval Mintz 	resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
1118fe56b9e6SYuval Mintz 	resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
1119fe56b9e6SYuval Mintz 	resc_num[QED_RL] = 8;
112025c089d7SYuval Mintz 	resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
112125c089d7SYuval Mintz 	resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
112225c089d7SYuval Mintz 			     num_funcs;
1123fe56b9e6SYuval Mintz 	resc_num[QED_ILT] = 950;
1124fe56b9e6SYuval Mintz 
1125fe56b9e6SYuval Mintz 	for (i = 0; i < QED_MAX_RESC; i++)
1126fe56b9e6SYuval Mintz 		resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1127fe56b9e6SYuval Mintz 
112825c089d7SYuval Mintz 	qed_hw_set_feat(p_hwfn);
112925c089d7SYuval Mintz 
1130fe56b9e6SYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1131fe56b9e6SYuval Mintz 		   "The numbers for each resource are:\n"
1132fe56b9e6SYuval Mintz 		   "SB = %d start = %d\n"
113325c089d7SYuval Mintz 		   "L2_QUEUE = %d start = %d\n"
1134fe56b9e6SYuval Mintz 		   "VPORT = %d start = %d\n"
1135fe56b9e6SYuval Mintz 		   "PQ = %d start = %d\n"
1136fe56b9e6SYuval Mintz 		   "RL = %d start = %d\n"
113725c089d7SYuval Mintz 		   "MAC = %d start = %d\n"
113825c089d7SYuval Mintz 		   "VLAN = %d start = %d\n"
1139fe56b9e6SYuval Mintz 		   "ILT = %d start = %d\n",
1140fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_SB],
1141fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_SB],
114225c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
114325c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
1144fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_VPORT],
1145fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_VPORT],
1146fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_PQ],
1147fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_PQ],
1148fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_RL],
1149fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_RL],
115025c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_MAC],
115125c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_MAC],
115225c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_VLAN],
115325c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_VLAN],
1154fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_ILT],
1155fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_ILT]);
1156fe56b9e6SYuval Mintz }
1157fe56b9e6SYuval Mintz 
1158fe56b9e6SYuval Mintz static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1159fe56b9e6SYuval Mintz 			       struct qed_ptt *p_ptt)
1160fe56b9e6SYuval Mintz {
1161cc875c2eSYuval Mintz 	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1162fc48b7a6SYuval Mintz 	u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
1163cc875c2eSYuval Mintz 	struct qed_mcp_link_params *link;
1164fe56b9e6SYuval Mintz 
1165fe56b9e6SYuval Mintz 	/* Read global nvm_cfg address */
1166fe56b9e6SYuval Mintz 	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1167fe56b9e6SYuval Mintz 
1168fe56b9e6SYuval Mintz 	/* Verify MCP has initialized it */
1169fe56b9e6SYuval Mintz 	if (!nvm_cfg_addr) {
1170fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1171fe56b9e6SYuval Mintz 		return -EINVAL;
1172fe56b9e6SYuval Mintz 	}
1173fe56b9e6SYuval Mintz 
1174fe56b9e6SYuval Mintz 	/* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
1175fe56b9e6SYuval Mintz 	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1176fe56b9e6SYuval Mintz 
1177cc875c2eSYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1178cc875c2eSYuval Mintz 	       offsetof(struct nvm_cfg1, glob) +
1179cc875c2eSYuval Mintz 	       offsetof(struct nvm_cfg1_glob, core_cfg);
1180cc875c2eSYuval Mintz 
1181cc875c2eSYuval Mintz 	core_cfg = qed_rd(p_hwfn, p_ptt, addr);
1182cc875c2eSYuval Mintz 
1183cc875c2eSYuval Mintz 	switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1184cc875c2eSYuval Mintz 		NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1185cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1186cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1187cc875c2eSYuval Mintz 		break;
1188cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1189cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1190cc875c2eSYuval Mintz 		break;
1191cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1192cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1193cc875c2eSYuval Mintz 		break;
1194cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1195cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1196cc875c2eSYuval Mintz 		break;
1197cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1198cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1199cc875c2eSYuval Mintz 		break;
1200cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1201cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1202cc875c2eSYuval Mintz 		break;
1203cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1204cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1205cc875c2eSYuval Mintz 		break;
1206cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1207cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1208cc875c2eSYuval Mintz 		break;
1209cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1210cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1211cc875c2eSYuval Mintz 		break;
1212cc875c2eSYuval Mintz 	default:
1213cc875c2eSYuval Mintz 		DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
1214cc875c2eSYuval Mintz 			  core_cfg);
1215cc875c2eSYuval Mintz 		break;
1216cc875c2eSYuval Mintz 	}
1217cc875c2eSYuval Mintz 
1218cc875c2eSYuval Mintz 	/* Read default link configuration */
1219cc875c2eSYuval Mintz 	link = &p_hwfn->mcp_info->link_input;
1220cc875c2eSYuval Mintz 	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1221cc875c2eSYuval Mintz 			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1222cc875c2eSYuval Mintz 	link_temp = qed_rd(p_hwfn, p_ptt,
1223cc875c2eSYuval Mintz 			   port_cfg_addr +
1224cc875c2eSYuval Mintz 			   offsetof(struct nvm_cfg1_port, speed_cap_mask));
1225cc875c2eSYuval Mintz 	link->speed.advertised_speeds =
1226cc875c2eSYuval Mintz 		link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1227cc875c2eSYuval Mintz 
1228cc875c2eSYuval Mintz 	p_hwfn->mcp_info->link_capabilities.speed_capabilities =
1229cc875c2eSYuval Mintz 						link->speed.advertised_speeds;
1230cc875c2eSYuval Mintz 
1231cc875c2eSYuval Mintz 	link_temp = qed_rd(p_hwfn, p_ptt,
1232cc875c2eSYuval Mintz 			   port_cfg_addr +
1233cc875c2eSYuval Mintz 			   offsetof(struct nvm_cfg1_port, link_settings));
1234cc875c2eSYuval Mintz 	switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1235cc875c2eSYuval Mintz 		NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1236cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1237cc875c2eSYuval Mintz 		link->speed.autoneg = true;
1238cc875c2eSYuval Mintz 		break;
1239cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1240cc875c2eSYuval Mintz 		link->speed.forced_speed = 1000;
1241cc875c2eSYuval Mintz 		break;
1242cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1243cc875c2eSYuval Mintz 		link->speed.forced_speed = 10000;
1244cc875c2eSYuval Mintz 		break;
1245cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1246cc875c2eSYuval Mintz 		link->speed.forced_speed = 25000;
1247cc875c2eSYuval Mintz 		break;
1248cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1249cc875c2eSYuval Mintz 		link->speed.forced_speed = 40000;
1250cc875c2eSYuval Mintz 		break;
1251cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1252cc875c2eSYuval Mintz 		link->speed.forced_speed = 50000;
1253cc875c2eSYuval Mintz 		break;
1254cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1255cc875c2eSYuval Mintz 		link->speed.forced_speed = 100000;
1256cc875c2eSYuval Mintz 		break;
1257cc875c2eSYuval Mintz 	default:
1258cc875c2eSYuval Mintz 		DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
1259cc875c2eSYuval Mintz 			  link_temp);
1260cc875c2eSYuval Mintz 	}
1261cc875c2eSYuval Mintz 
1262cc875c2eSYuval Mintz 	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1263cc875c2eSYuval Mintz 	link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1264cc875c2eSYuval Mintz 	link->pause.autoneg = !!(link_temp &
1265cc875c2eSYuval Mintz 				 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1266cc875c2eSYuval Mintz 	link->pause.forced_rx = !!(link_temp &
1267cc875c2eSYuval Mintz 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1268cc875c2eSYuval Mintz 	link->pause.forced_tx = !!(link_temp &
1269cc875c2eSYuval Mintz 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1270cc875c2eSYuval Mintz 	link->loopback_mode = 0;
1271cc875c2eSYuval Mintz 
1272cc875c2eSYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1273cc875c2eSYuval Mintz 		   "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1274cc875c2eSYuval Mintz 		   link->speed.forced_speed, link->speed.advertised_speeds,
1275cc875c2eSYuval Mintz 		   link->speed.autoneg, link->pause.autoneg);
1276cc875c2eSYuval Mintz 
1277fe56b9e6SYuval Mintz 	/* Read Multi-function information from shmem */
1278fe56b9e6SYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1279fe56b9e6SYuval Mintz 	       offsetof(struct nvm_cfg1, glob) +
1280fe56b9e6SYuval Mintz 	       offsetof(struct nvm_cfg1_glob, generic_cont0);
1281fe56b9e6SYuval Mintz 
1282fe56b9e6SYuval Mintz 	generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1283fe56b9e6SYuval Mintz 
1284fe56b9e6SYuval Mintz 	mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1285fe56b9e6SYuval Mintz 		  NVM_CFG1_GLOB_MF_MODE_OFFSET;
1286fe56b9e6SYuval Mintz 
1287fe56b9e6SYuval Mintz 	switch (mf_mode) {
1288fe56b9e6SYuval Mintz 	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1289fc48b7a6SYuval Mintz 		p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
1290fe56b9e6SYuval Mintz 		break;
1291fe56b9e6SYuval Mintz 	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1292fc48b7a6SYuval Mintz 		p_hwfn->cdev->mf_mode = QED_MF_NPAR;
1293fe56b9e6SYuval Mintz 		break;
1294fc48b7a6SYuval Mintz 	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
1295fc48b7a6SYuval Mintz 		p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
1296fe56b9e6SYuval Mintz 		break;
1297fe56b9e6SYuval Mintz 	}
1298fe56b9e6SYuval Mintz 	DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1299fe56b9e6SYuval Mintz 		p_hwfn->cdev->mf_mode);
1300fe56b9e6SYuval Mintz 
1301fc48b7a6SYuval Mintz 	/* Read Multi-function information from shmem */
1302fc48b7a6SYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1303fc48b7a6SYuval Mintz 		offsetof(struct nvm_cfg1, glob) +
1304fc48b7a6SYuval Mintz 		offsetof(struct nvm_cfg1_glob, device_capabilities);
1305fc48b7a6SYuval Mintz 
1306fc48b7a6SYuval Mintz 	device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
1307fc48b7a6SYuval Mintz 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
1308fc48b7a6SYuval Mintz 		__set_bit(QED_DEV_CAP_ETH,
1309fc48b7a6SYuval Mintz 			  &p_hwfn->hw_info.device_capabilities);
1310fc48b7a6SYuval Mintz 
1311fe56b9e6SYuval Mintz 	return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1312fe56b9e6SYuval Mintz }
1313fe56b9e6SYuval Mintz 
13141408cc1fSYuval Mintz static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
13151408cc1fSYuval Mintz {
13161408cc1fSYuval Mintz 	u32 reg_function_hide, tmp, eng_mask;
13171408cc1fSYuval Mintz 	u8 num_funcs;
13181408cc1fSYuval Mintz 
13191408cc1fSYuval Mintz 	num_funcs = MAX_NUM_PFS_BB;
13201408cc1fSYuval Mintz 
13211408cc1fSYuval Mintz 	/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
13221408cc1fSYuval Mintz 	 * in the other bits are selected.
13231408cc1fSYuval Mintz 	 * Bits 1-15 are for functions 1-15, respectively, and their value is
13241408cc1fSYuval Mintz 	 * '0' only for enabled functions (function 0 always exists and
13251408cc1fSYuval Mintz 	 * enabled).
13261408cc1fSYuval Mintz 	 * In case of CMT, only the "even" functions are enabled, and thus the
13271408cc1fSYuval Mintz 	 * number of functions for both hwfns is learnt from the same bits.
13281408cc1fSYuval Mintz 	 */
13291408cc1fSYuval Mintz 	reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
13301408cc1fSYuval Mintz 
13311408cc1fSYuval Mintz 	if (reg_function_hide & 0x1) {
13321408cc1fSYuval Mintz 		if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
13331408cc1fSYuval Mintz 			num_funcs = 0;
13341408cc1fSYuval Mintz 			eng_mask = 0xaaaa;
13351408cc1fSYuval Mintz 		} else {
13361408cc1fSYuval Mintz 			num_funcs = 1;
13371408cc1fSYuval Mintz 			eng_mask = 0x5554;
13381408cc1fSYuval Mintz 		}
13391408cc1fSYuval Mintz 
13401408cc1fSYuval Mintz 		/* Get the number of the enabled functions on the engine */
13411408cc1fSYuval Mintz 		tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
13421408cc1fSYuval Mintz 		while (tmp) {
13431408cc1fSYuval Mintz 			if (tmp & 0x1)
13441408cc1fSYuval Mintz 				num_funcs++;
13451408cc1fSYuval Mintz 			tmp >>= 0x1;
13461408cc1fSYuval Mintz 		}
13471408cc1fSYuval Mintz 	}
13481408cc1fSYuval Mintz 
13491408cc1fSYuval Mintz 	p_hwfn->num_funcs_on_engine = num_funcs;
13501408cc1fSYuval Mintz 
13511408cc1fSYuval Mintz 	DP_VERBOSE(p_hwfn,
13521408cc1fSYuval Mintz 		   NETIF_MSG_PROBE,
13531408cc1fSYuval Mintz 		   "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
13541408cc1fSYuval Mintz 		   p_hwfn->rel_pf_id,
13551408cc1fSYuval Mintz 		   p_hwfn->abs_pf_id,
13561408cc1fSYuval Mintz 		   p_hwfn->num_funcs_on_engine);
13571408cc1fSYuval Mintz }
13581408cc1fSYuval Mintz 
1359fe56b9e6SYuval Mintz static int
1360fe56b9e6SYuval Mintz qed_get_hw_info(struct qed_hwfn *p_hwfn,
1361fe56b9e6SYuval Mintz 		struct qed_ptt *p_ptt,
1362fe56b9e6SYuval Mintz 		enum qed_pci_personality personality)
1363fe56b9e6SYuval Mintz {
1364fe56b9e6SYuval Mintz 	u32 port_mode;
1365fe56b9e6SYuval Mintz 	int rc;
1366fe56b9e6SYuval Mintz 
136732a47e72SYuval Mintz 	/* Since all information is common, only first hwfns should do this */
136832a47e72SYuval Mintz 	if (IS_LEAD_HWFN(p_hwfn)) {
136932a47e72SYuval Mintz 		rc = qed_iov_hw_info(p_hwfn);
137032a47e72SYuval Mintz 		if (rc)
137132a47e72SYuval Mintz 			return rc;
137232a47e72SYuval Mintz 	}
137332a47e72SYuval Mintz 
1374fe56b9e6SYuval Mintz 	/* Read the port mode */
1375fe56b9e6SYuval Mintz 	port_mode = qed_rd(p_hwfn, p_ptt,
1376fe56b9e6SYuval Mintz 			   CNIG_REG_NW_PORT_MODE_BB_B0);
1377fe56b9e6SYuval Mintz 
1378fe56b9e6SYuval Mintz 	if (port_mode < 3) {
1379fe56b9e6SYuval Mintz 		p_hwfn->cdev->num_ports_in_engines = 1;
1380fe56b9e6SYuval Mintz 	} else if (port_mode <= 5) {
1381fe56b9e6SYuval Mintz 		p_hwfn->cdev->num_ports_in_engines = 2;
1382fe56b9e6SYuval Mintz 	} else {
1383fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1384fe56b9e6SYuval Mintz 			  p_hwfn->cdev->num_ports_in_engines);
1385fe56b9e6SYuval Mintz 
1386fe56b9e6SYuval Mintz 		/* Default num_ports_in_engines to something */
1387fe56b9e6SYuval Mintz 		p_hwfn->cdev->num_ports_in_engines = 1;
1388fe56b9e6SYuval Mintz 	}
1389fe56b9e6SYuval Mintz 
1390fe56b9e6SYuval Mintz 	qed_hw_get_nvm_info(p_hwfn, p_ptt);
1391fe56b9e6SYuval Mintz 
1392fe56b9e6SYuval Mintz 	rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1393fe56b9e6SYuval Mintz 	if (rc)
1394fe56b9e6SYuval Mintz 		return rc;
1395fe56b9e6SYuval Mintz 
1396fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn))
1397fe56b9e6SYuval Mintz 		ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1398fe56b9e6SYuval Mintz 				p_hwfn->mcp_info->func_info.mac);
1399fe56b9e6SYuval Mintz 	else
1400fe56b9e6SYuval Mintz 		eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1401fe56b9e6SYuval Mintz 
1402fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn)) {
1403fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1404fe56b9e6SYuval Mintz 			p_hwfn->hw_info.ovlan =
1405fe56b9e6SYuval Mintz 				p_hwfn->mcp_info->func_info.ovlan;
1406fe56b9e6SYuval Mintz 
1407fe56b9e6SYuval Mintz 		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1408fe56b9e6SYuval Mintz 	}
1409fe56b9e6SYuval Mintz 
1410fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn)) {
1411fe56b9e6SYuval Mintz 		enum qed_pci_personality protocol;
1412fe56b9e6SYuval Mintz 
1413fe56b9e6SYuval Mintz 		protocol = p_hwfn->mcp_info->func_info.protocol;
1414fe56b9e6SYuval Mintz 		p_hwfn->hw_info.personality = protocol;
1415fe56b9e6SYuval Mintz 	}
1416fe56b9e6SYuval Mintz 
14171408cc1fSYuval Mintz 	qed_get_num_funcs(p_hwfn, p_ptt);
14181408cc1fSYuval Mintz 
1419fe56b9e6SYuval Mintz 	qed_hw_get_resc(p_hwfn);
1420fe56b9e6SYuval Mintz 
1421fe56b9e6SYuval Mintz 	return rc;
1422fe56b9e6SYuval Mintz }
1423fe56b9e6SYuval Mintz 
142412e09c69SYuval Mintz static int qed_get_dev_info(struct qed_dev *cdev)
1425fe56b9e6SYuval Mintz {
1426fc48b7a6SYuval Mintz 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1427fe56b9e6SYuval Mintz 	u32 tmp;
1428fe56b9e6SYuval Mintz 
1429fc48b7a6SYuval Mintz 	/* Read Vendor Id / Device Id */
1430fc48b7a6SYuval Mintz 	pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
1431fc48b7a6SYuval Mintz 			     &cdev->vendor_id);
1432fc48b7a6SYuval Mintz 	pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
1433fc48b7a6SYuval Mintz 			     &cdev->device_id);
1434fc48b7a6SYuval Mintz 	cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1435fe56b9e6SYuval Mintz 				     MISCS_REG_CHIP_NUM);
1436fc48b7a6SYuval Mintz 	cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1437fe56b9e6SYuval Mintz 				     MISCS_REG_CHIP_REV);
1438fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_REV, cdev->chip_rev);
1439fe56b9e6SYuval Mintz 
1440fc48b7a6SYuval Mintz 	cdev->type = QED_DEV_TYPE_BB;
1441fe56b9e6SYuval Mintz 	/* Learn number of HW-functions */
1442fc48b7a6SYuval Mintz 	tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1443fe56b9e6SYuval Mintz 		     MISCS_REG_CMT_ENABLED_FOR_PAIR);
1444fe56b9e6SYuval Mintz 
1445fc48b7a6SYuval Mintz 	if (tmp & (1 << p_hwfn->rel_pf_id)) {
1446fe56b9e6SYuval Mintz 		DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1447fe56b9e6SYuval Mintz 		cdev->num_hwfns = 2;
1448fe56b9e6SYuval Mintz 	} else {
1449fe56b9e6SYuval Mintz 		cdev->num_hwfns = 1;
1450fe56b9e6SYuval Mintz 	}
1451fe56b9e6SYuval Mintz 
1452fc48b7a6SYuval Mintz 	cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1453fe56b9e6SYuval Mintz 				    MISCS_REG_CHIP_TEST_REG) >> 4;
1454fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
1455fc48b7a6SYuval Mintz 	cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1456fe56b9e6SYuval Mintz 				       MISCS_REG_CHIP_METAL);
1457fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1458fe56b9e6SYuval Mintz 
1459fe56b9e6SYuval Mintz 	DP_INFO(cdev->hwfns,
1460fe56b9e6SYuval Mintz 		"Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1461fe56b9e6SYuval Mintz 		cdev->chip_num, cdev->chip_rev,
1462fe56b9e6SYuval Mintz 		cdev->chip_bond_id, cdev->chip_metal);
146312e09c69SYuval Mintz 
146412e09c69SYuval Mintz 	if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
146512e09c69SYuval Mintz 		DP_NOTICE(cdev->hwfns,
146612e09c69SYuval Mintz 			  "The chip type/rev (BB A0) is not supported!\n");
146712e09c69SYuval Mintz 		return -EINVAL;
146812e09c69SYuval Mintz 	}
146912e09c69SYuval Mintz 
147012e09c69SYuval Mintz 	return 0;
1471fe56b9e6SYuval Mintz }
1472fe56b9e6SYuval Mintz 
1473fe56b9e6SYuval Mintz static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1474fe56b9e6SYuval Mintz 				 void __iomem *p_regview,
1475fe56b9e6SYuval Mintz 				 void __iomem *p_doorbells,
1476fe56b9e6SYuval Mintz 				 enum qed_pci_personality personality)
1477fe56b9e6SYuval Mintz {
1478fe56b9e6SYuval Mintz 	int rc = 0;
1479fe56b9e6SYuval Mintz 
1480fe56b9e6SYuval Mintz 	/* Split PCI bars evenly between hwfns */
1481fe56b9e6SYuval Mintz 	p_hwfn->regview = p_regview;
1482fe56b9e6SYuval Mintz 	p_hwfn->doorbells = p_doorbells;
1483fe56b9e6SYuval Mintz 
14841408cc1fSYuval Mintz 	if (IS_VF(p_hwfn->cdev))
14851408cc1fSYuval Mintz 		return qed_vf_hw_prepare(p_hwfn);
14861408cc1fSYuval Mintz 
1487fe56b9e6SYuval Mintz 	/* Validate that chip access is feasible */
1488fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1489fe56b9e6SYuval Mintz 		DP_ERR(p_hwfn,
1490fe56b9e6SYuval Mintz 		       "Reading the ME register returns all Fs; Preventing further chip access\n");
1491fe56b9e6SYuval Mintz 		return -EINVAL;
1492fe56b9e6SYuval Mintz 	}
1493fe56b9e6SYuval Mintz 
1494fe56b9e6SYuval Mintz 	get_function_id(p_hwfn);
1495fe56b9e6SYuval Mintz 
149612e09c69SYuval Mintz 	/* Allocate PTT pool */
149712e09c69SYuval Mintz 	rc = qed_ptt_pool_alloc(p_hwfn);
1498fe56b9e6SYuval Mintz 	if (rc) {
1499fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1500fe56b9e6SYuval Mintz 		goto err0;
1501fe56b9e6SYuval Mintz 	}
1502fe56b9e6SYuval Mintz 
150312e09c69SYuval Mintz 	/* Allocate the main PTT */
150412e09c69SYuval Mintz 	p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
150512e09c69SYuval Mintz 
1506fe56b9e6SYuval Mintz 	/* First hwfn learns basic information, e.g., number of hwfns */
150712e09c69SYuval Mintz 	if (!p_hwfn->my_id) {
150812e09c69SYuval Mintz 		rc = qed_get_dev_info(p_hwfn->cdev);
150912e09c69SYuval Mintz 		if (rc != 0)
151012e09c69SYuval Mintz 			goto err1;
151112e09c69SYuval Mintz 	}
151212e09c69SYuval Mintz 
151312e09c69SYuval Mintz 	qed_hw_hwfn_prepare(p_hwfn);
1514fe56b9e6SYuval Mintz 
1515fe56b9e6SYuval Mintz 	/* Initialize MCP structure */
1516fe56b9e6SYuval Mintz 	rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1517fe56b9e6SYuval Mintz 	if (rc) {
1518fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1519fe56b9e6SYuval Mintz 		goto err1;
1520fe56b9e6SYuval Mintz 	}
1521fe56b9e6SYuval Mintz 
1522fe56b9e6SYuval Mintz 	/* Read the device configuration information from the HW and SHMEM */
1523fe56b9e6SYuval Mintz 	rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1524fe56b9e6SYuval Mintz 	if (rc) {
1525fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1526fe56b9e6SYuval Mintz 		goto err2;
1527fe56b9e6SYuval Mintz 	}
1528fe56b9e6SYuval Mintz 
1529fe56b9e6SYuval Mintz 	/* Allocate the init RT array and initialize the init-ops engine */
1530fe56b9e6SYuval Mintz 	rc = qed_init_alloc(p_hwfn);
1531fe56b9e6SYuval Mintz 	if (rc) {
1532fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1533fe56b9e6SYuval Mintz 		goto err2;
1534fe56b9e6SYuval Mintz 	}
1535fe56b9e6SYuval Mintz 
1536fe56b9e6SYuval Mintz 	return rc;
1537fe56b9e6SYuval Mintz err2:
153832a47e72SYuval Mintz 	if (IS_LEAD_HWFN(p_hwfn))
153932a47e72SYuval Mintz 		qed_iov_free_hw_info(p_hwfn->cdev);
1540fe56b9e6SYuval Mintz 	qed_mcp_free(p_hwfn);
1541fe56b9e6SYuval Mintz err1:
1542fe56b9e6SYuval Mintz 	qed_hw_hwfn_free(p_hwfn);
1543fe56b9e6SYuval Mintz err0:
1544fe56b9e6SYuval Mintz 	return rc;
1545fe56b9e6SYuval Mintz }
1546fe56b9e6SYuval Mintz 
1547fe56b9e6SYuval Mintz int qed_hw_prepare(struct qed_dev *cdev,
1548fe56b9e6SYuval Mintz 		   int personality)
1549fe56b9e6SYuval Mintz {
1550c78df14eSAriel Elior 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1551c78df14eSAriel Elior 	int rc;
1552fe56b9e6SYuval Mintz 
1553fe56b9e6SYuval Mintz 	/* Store the precompiled init data ptrs */
15541408cc1fSYuval Mintz 	if (IS_PF(cdev))
1555fe56b9e6SYuval Mintz 		qed_init_iro_array(cdev);
1556fe56b9e6SYuval Mintz 
1557fe56b9e6SYuval Mintz 	/* Initialize the first hwfn - will learn number of hwfns */
1558c78df14eSAriel Elior 	rc = qed_hw_prepare_single(p_hwfn,
1559c78df14eSAriel Elior 				   cdev->regview,
1560fe56b9e6SYuval Mintz 				   cdev->doorbells, personality);
1561fe56b9e6SYuval Mintz 	if (rc)
1562fe56b9e6SYuval Mintz 		return rc;
1563fe56b9e6SYuval Mintz 
1564c78df14eSAriel Elior 	personality = p_hwfn->hw_info.personality;
1565fe56b9e6SYuval Mintz 
1566fe56b9e6SYuval Mintz 	/* Initialize the rest of the hwfns */
1567c78df14eSAriel Elior 	if (cdev->num_hwfns > 1) {
1568fe56b9e6SYuval Mintz 		void __iomem *p_regview, *p_doorbell;
1569c78df14eSAriel Elior 		u8 __iomem *addr;
1570fe56b9e6SYuval Mintz 
1571c78df14eSAriel Elior 		/* adjust bar offset for second engine */
1572c2035eeaSRam Amrani 		addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
1573c78df14eSAriel Elior 		p_regview = addr;
1574c78df14eSAriel Elior 
1575c78df14eSAriel Elior 		/* adjust doorbell bar offset for second engine */
1576c2035eeaSRam Amrani 		addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
1577c78df14eSAriel Elior 		p_doorbell = addr;
1578c78df14eSAriel Elior 
1579c78df14eSAriel Elior 		/* prepare second hw function */
1580c78df14eSAriel Elior 		rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
1581fe56b9e6SYuval Mintz 					   p_doorbell, personality);
1582c78df14eSAriel Elior 
1583c78df14eSAriel Elior 		/* in case of error, need to free the previously
1584c78df14eSAriel Elior 		 * initiliazed hwfn 0.
1585c78df14eSAriel Elior 		 */
1586fe56b9e6SYuval Mintz 		if (rc) {
15871408cc1fSYuval Mintz 			if (IS_PF(cdev)) {
1588c78df14eSAriel Elior 				qed_init_free(p_hwfn);
1589c78df14eSAriel Elior 				qed_mcp_free(p_hwfn);
1590c78df14eSAriel Elior 				qed_hw_hwfn_free(p_hwfn);
1591fe56b9e6SYuval Mintz 			}
1592fe56b9e6SYuval Mintz 		}
15931408cc1fSYuval Mintz 	}
1594fe56b9e6SYuval Mintz 
1595c78df14eSAriel Elior 	return rc;
1596fe56b9e6SYuval Mintz }
1597fe56b9e6SYuval Mintz 
1598fe56b9e6SYuval Mintz void qed_hw_remove(struct qed_dev *cdev)
1599fe56b9e6SYuval Mintz {
1600fe56b9e6SYuval Mintz 	int i;
1601fe56b9e6SYuval Mintz 
1602fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
1603fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1604fe56b9e6SYuval Mintz 
16051408cc1fSYuval Mintz 		if (IS_VF(cdev)) {
16060b55e27dSYuval Mintz 			qed_vf_pf_release(p_hwfn);
16071408cc1fSYuval Mintz 			continue;
16081408cc1fSYuval Mintz 		}
16091408cc1fSYuval Mintz 
1610fe56b9e6SYuval Mintz 		qed_init_free(p_hwfn);
1611fe56b9e6SYuval Mintz 		qed_hw_hwfn_free(p_hwfn);
1612fe56b9e6SYuval Mintz 		qed_mcp_free(p_hwfn);
1613fe56b9e6SYuval Mintz 	}
161432a47e72SYuval Mintz 
161532a47e72SYuval Mintz 	qed_iov_free_hw_info(cdev);
1616fe56b9e6SYuval Mintz }
1617fe56b9e6SYuval Mintz 
1618fe56b9e6SYuval Mintz int qed_chain_alloc(struct qed_dev *cdev,
1619fe56b9e6SYuval Mintz 		    enum qed_chain_use_mode intended_use,
1620fe56b9e6SYuval Mintz 		    enum qed_chain_mode mode,
1621fe56b9e6SYuval Mintz 		    u16 num_elems,
1622fe56b9e6SYuval Mintz 		    size_t elem_size,
1623fe56b9e6SYuval Mintz 		    struct qed_chain *p_chain)
1624fe56b9e6SYuval Mintz {
1625fe56b9e6SYuval Mintz 	dma_addr_t p_pbl_phys = 0;
1626fe56b9e6SYuval Mintz 	void *p_pbl_virt = NULL;
1627fe56b9e6SYuval Mintz 	dma_addr_t p_phys = 0;
1628fe56b9e6SYuval Mintz 	void *p_virt = NULL;
1629fe56b9e6SYuval Mintz 	u16 page_cnt = 0;
1630fe56b9e6SYuval Mintz 	size_t size;
1631fe56b9e6SYuval Mintz 
1632fe56b9e6SYuval Mintz 	if (mode == QED_CHAIN_MODE_SINGLE)
1633fe56b9e6SYuval Mintz 		page_cnt = 1;
1634fe56b9e6SYuval Mintz 	else
1635fe56b9e6SYuval Mintz 		page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1636fe56b9e6SYuval Mintz 
1637fe56b9e6SYuval Mintz 	size = page_cnt * QED_CHAIN_PAGE_SIZE;
1638fe56b9e6SYuval Mintz 	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1639fe56b9e6SYuval Mintz 				    size, &p_phys, GFP_KERNEL);
1640fe56b9e6SYuval Mintz 	if (!p_virt) {
1641fe56b9e6SYuval Mintz 		DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1642fe56b9e6SYuval Mintz 		goto nomem;
1643fe56b9e6SYuval Mintz 	}
1644fe56b9e6SYuval Mintz 
1645fe56b9e6SYuval Mintz 	if (mode == QED_CHAIN_MODE_PBL) {
1646fe56b9e6SYuval Mintz 		size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1647fe56b9e6SYuval Mintz 		p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1648fe56b9e6SYuval Mintz 						size, &p_pbl_phys,
1649fe56b9e6SYuval Mintz 						GFP_KERNEL);
1650fe56b9e6SYuval Mintz 		if (!p_pbl_virt) {
1651fe56b9e6SYuval Mintz 			DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1652fe56b9e6SYuval Mintz 			goto nomem;
1653fe56b9e6SYuval Mintz 		}
1654fe56b9e6SYuval Mintz 
1655fe56b9e6SYuval Mintz 		qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1656fe56b9e6SYuval Mintz 				   (u8)elem_size, intended_use,
1657fe56b9e6SYuval Mintz 				   p_pbl_phys, p_pbl_virt);
1658fe56b9e6SYuval Mintz 	} else {
1659fe56b9e6SYuval Mintz 		qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1660fe56b9e6SYuval Mintz 			       (u8)elem_size, intended_use, mode);
1661fe56b9e6SYuval Mintz 	}
1662fe56b9e6SYuval Mintz 
1663fe56b9e6SYuval Mintz 	return 0;
1664fe56b9e6SYuval Mintz 
1665fe56b9e6SYuval Mintz nomem:
1666fe56b9e6SYuval Mintz 	dma_free_coherent(&cdev->pdev->dev,
1667fe56b9e6SYuval Mintz 			  page_cnt * QED_CHAIN_PAGE_SIZE,
1668fe56b9e6SYuval Mintz 			  p_virt, p_phys);
1669fe56b9e6SYuval Mintz 	dma_free_coherent(&cdev->pdev->dev,
1670fe56b9e6SYuval Mintz 			  page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1671fe56b9e6SYuval Mintz 			  p_pbl_virt, p_pbl_phys);
1672fe56b9e6SYuval Mintz 
1673fe56b9e6SYuval Mintz 	return -ENOMEM;
1674fe56b9e6SYuval Mintz }
1675fe56b9e6SYuval Mintz 
1676fe56b9e6SYuval Mintz void qed_chain_free(struct qed_dev *cdev,
1677fe56b9e6SYuval Mintz 		    struct qed_chain *p_chain)
1678fe56b9e6SYuval Mintz {
1679fe56b9e6SYuval Mintz 	size_t size;
1680fe56b9e6SYuval Mintz 
1681fe56b9e6SYuval Mintz 	if (!p_chain->p_virt_addr)
1682fe56b9e6SYuval Mintz 		return;
1683fe56b9e6SYuval Mintz 
1684fe56b9e6SYuval Mintz 	if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1685fe56b9e6SYuval Mintz 		size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1686fe56b9e6SYuval Mintz 		dma_free_coherent(&cdev->pdev->dev, size,
1687fe56b9e6SYuval Mintz 				  p_chain->pbl.p_virt_table,
1688fe56b9e6SYuval Mintz 				  p_chain->pbl.p_phys_table);
1689fe56b9e6SYuval Mintz 	}
1690fe56b9e6SYuval Mintz 
1691fe56b9e6SYuval Mintz 	size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1692fe56b9e6SYuval Mintz 	dma_free_coherent(&cdev->pdev->dev, size,
1693fe56b9e6SYuval Mintz 			  p_chain->p_virt_addr,
1694fe56b9e6SYuval Mintz 			  p_chain->p_phys_addr);
1695fe56b9e6SYuval Mintz }
1696cee4d264SManish Chopra 
1697cee4d264SManish Chopra int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
1698cee4d264SManish Chopra 		    u16 src_id, u16 *dst_id)
1699cee4d264SManish Chopra {
1700cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
1701cee4d264SManish Chopra 		u16 min, max;
1702cee4d264SManish Chopra 
1703cee4d264SManish Chopra 		min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
1704cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
1705cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
1706cee4d264SManish Chopra 			  "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1707cee4d264SManish Chopra 			  src_id, min, max);
1708cee4d264SManish Chopra 
1709cee4d264SManish Chopra 		return -EINVAL;
1710cee4d264SManish Chopra 	}
1711cee4d264SManish Chopra 
1712cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
1713cee4d264SManish Chopra 
1714cee4d264SManish Chopra 	return 0;
1715cee4d264SManish Chopra }
1716cee4d264SManish Chopra 
1717cee4d264SManish Chopra int qed_fw_vport(struct qed_hwfn *p_hwfn,
1718cee4d264SManish Chopra 		 u8 src_id, u8 *dst_id)
1719cee4d264SManish Chopra {
1720cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
1721cee4d264SManish Chopra 		u8 min, max;
1722cee4d264SManish Chopra 
1723cee4d264SManish Chopra 		min = (u8)RESC_START(p_hwfn, QED_VPORT);
1724cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_VPORT);
1725cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
1726cee4d264SManish Chopra 			  "vport id [%d] is not valid, available indices [%d - %d]\n",
1727cee4d264SManish Chopra 			  src_id, min, max);
1728cee4d264SManish Chopra 
1729cee4d264SManish Chopra 		return -EINVAL;
1730cee4d264SManish Chopra 	}
1731cee4d264SManish Chopra 
1732cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
1733cee4d264SManish Chopra 
1734cee4d264SManish Chopra 	return 0;
1735cee4d264SManish Chopra }
1736cee4d264SManish Chopra 
1737cee4d264SManish Chopra int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
1738cee4d264SManish Chopra 		   u8 src_id, u8 *dst_id)
1739cee4d264SManish Chopra {
1740cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
1741cee4d264SManish Chopra 		u8 min, max;
1742cee4d264SManish Chopra 
1743cee4d264SManish Chopra 		min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
1744cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
1745cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
1746cee4d264SManish Chopra 			  "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1747cee4d264SManish Chopra 			  src_id, min, max);
1748cee4d264SManish Chopra 
1749cee4d264SManish Chopra 		return -EINVAL;
1750cee4d264SManish Chopra 	}
1751cee4d264SManish Chopra 
1752cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
1753cee4d264SManish Chopra 
1754cee4d264SManish Chopra 	return 0;
1755cee4d264SManish Chopra }
1756bcd197c8SManish Chopra 
1757bcd197c8SManish Chopra /* Calculate final WFQ values for all vports and configure them.
1758bcd197c8SManish Chopra  * After this configuration each vport will have
1759bcd197c8SManish Chopra  * approx min rate =  min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
1760bcd197c8SManish Chopra  */
1761bcd197c8SManish Chopra static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1762bcd197c8SManish Chopra 					     struct qed_ptt *p_ptt,
1763bcd197c8SManish Chopra 					     u32 min_pf_rate)
1764bcd197c8SManish Chopra {
1765bcd197c8SManish Chopra 	struct init_qm_vport_params *vport_params;
1766bcd197c8SManish Chopra 	int i;
1767bcd197c8SManish Chopra 
1768bcd197c8SManish Chopra 	vport_params = p_hwfn->qm_info.qm_vport_params;
1769bcd197c8SManish Chopra 
1770bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1771bcd197c8SManish Chopra 		u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1772bcd197c8SManish Chopra 
1773bcd197c8SManish Chopra 		vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
1774bcd197c8SManish Chopra 						min_pf_rate;
1775bcd197c8SManish Chopra 		qed_init_vport_wfq(p_hwfn, p_ptt,
1776bcd197c8SManish Chopra 				   vport_params[i].first_tx_pq_id,
1777bcd197c8SManish Chopra 				   vport_params[i].vport_wfq);
1778bcd197c8SManish Chopra 	}
1779bcd197c8SManish Chopra }
1780bcd197c8SManish Chopra 
1781bcd197c8SManish Chopra static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
1782bcd197c8SManish Chopra 				       u32 min_pf_rate)
1783bcd197c8SManish Chopra 
1784bcd197c8SManish Chopra {
1785bcd197c8SManish Chopra 	int i;
1786bcd197c8SManish Chopra 
1787bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
1788bcd197c8SManish Chopra 		p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
1789bcd197c8SManish Chopra }
1790bcd197c8SManish Chopra 
1791bcd197c8SManish Chopra static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1792bcd197c8SManish Chopra 					   struct qed_ptt *p_ptt,
1793bcd197c8SManish Chopra 					   u32 min_pf_rate)
1794bcd197c8SManish Chopra {
1795bcd197c8SManish Chopra 	struct init_qm_vport_params *vport_params;
1796bcd197c8SManish Chopra 	int i;
1797bcd197c8SManish Chopra 
1798bcd197c8SManish Chopra 	vport_params = p_hwfn->qm_info.qm_vport_params;
1799bcd197c8SManish Chopra 
1800bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1801bcd197c8SManish Chopra 		qed_init_wfq_default_param(p_hwfn, min_pf_rate);
1802bcd197c8SManish Chopra 		qed_init_vport_wfq(p_hwfn, p_ptt,
1803bcd197c8SManish Chopra 				   vport_params[i].first_tx_pq_id,
1804bcd197c8SManish Chopra 				   vport_params[i].vport_wfq);
1805bcd197c8SManish Chopra 	}
1806bcd197c8SManish Chopra }
1807bcd197c8SManish Chopra 
1808bcd197c8SManish Chopra /* This function performs several validations for WFQ
1809bcd197c8SManish Chopra  * configuration and required min rate for a given vport
1810bcd197c8SManish Chopra  * 1. req_rate must be greater than one percent of min_pf_rate.
1811bcd197c8SManish Chopra  * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
1812bcd197c8SManish Chopra  *    rates to get less than one percent of min_pf_rate.
1813bcd197c8SManish Chopra  * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
1814bcd197c8SManish Chopra  */
1815bcd197c8SManish Chopra static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
1816bcd197c8SManish Chopra 			      u16 vport_id, u32 req_rate,
1817bcd197c8SManish Chopra 			      u32 min_pf_rate)
1818bcd197c8SManish Chopra {
1819bcd197c8SManish Chopra 	u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
1820bcd197c8SManish Chopra 	int non_requested_count = 0, req_count = 0, i, num_vports;
1821bcd197c8SManish Chopra 
1822bcd197c8SManish Chopra 	num_vports = p_hwfn->qm_info.num_vports;
1823bcd197c8SManish Chopra 
1824bcd197c8SManish Chopra 	/* Accounting for the vports which are configured for WFQ explicitly */
1825bcd197c8SManish Chopra 	for (i = 0; i < num_vports; i++) {
1826bcd197c8SManish Chopra 		u32 tmp_speed;
1827bcd197c8SManish Chopra 
1828bcd197c8SManish Chopra 		if ((i != vport_id) &&
1829bcd197c8SManish Chopra 		    p_hwfn->qm_info.wfq_data[i].configured) {
1830bcd197c8SManish Chopra 			req_count++;
1831bcd197c8SManish Chopra 			tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1832bcd197c8SManish Chopra 			total_req_min_rate += tmp_speed;
1833bcd197c8SManish Chopra 		}
1834bcd197c8SManish Chopra 	}
1835bcd197c8SManish Chopra 
1836bcd197c8SManish Chopra 	/* Include current vport data as well */
1837bcd197c8SManish Chopra 	req_count++;
1838bcd197c8SManish Chopra 	total_req_min_rate += req_rate;
1839bcd197c8SManish Chopra 	non_requested_count = num_vports - req_count;
1840bcd197c8SManish Chopra 
1841bcd197c8SManish Chopra 	if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
1842bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1843bcd197c8SManish Chopra 			   "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1844bcd197c8SManish Chopra 			   vport_id, req_rate, min_pf_rate);
1845bcd197c8SManish Chopra 		return -EINVAL;
1846bcd197c8SManish Chopra 	}
1847bcd197c8SManish Chopra 
1848bcd197c8SManish Chopra 	if (num_vports > QED_WFQ_UNIT) {
1849bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1850bcd197c8SManish Chopra 			   "Number of vports is greater than %d\n",
1851bcd197c8SManish Chopra 			   QED_WFQ_UNIT);
1852bcd197c8SManish Chopra 		return -EINVAL;
1853bcd197c8SManish Chopra 	}
1854bcd197c8SManish Chopra 
1855bcd197c8SManish Chopra 	if (total_req_min_rate > min_pf_rate) {
1856bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1857bcd197c8SManish Chopra 			   "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
1858bcd197c8SManish Chopra 			   total_req_min_rate, min_pf_rate);
1859bcd197c8SManish Chopra 		return -EINVAL;
1860bcd197c8SManish Chopra 	}
1861bcd197c8SManish Chopra 
1862bcd197c8SManish Chopra 	total_left_rate	= min_pf_rate - total_req_min_rate;
1863bcd197c8SManish Chopra 
1864bcd197c8SManish Chopra 	left_rate_per_vp = total_left_rate / non_requested_count;
1865bcd197c8SManish Chopra 	if (left_rate_per_vp <  min_pf_rate / QED_WFQ_UNIT) {
1866bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1867bcd197c8SManish Chopra 			   "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1868bcd197c8SManish Chopra 			   left_rate_per_vp, min_pf_rate);
1869bcd197c8SManish Chopra 		return -EINVAL;
1870bcd197c8SManish Chopra 	}
1871bcd197c8SManish Chopra 
1872bcd197c8SManish Chopra 	p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
1873bcd197c8SManish Chopra 	p_hwfn->qm_info.wfq_data[vport_id].configured = true;
1874bcd197c8SManish Chopra 
1875bcd197c8SManish Chopra 	for (i = 0; i < num_vports; i++) {
1876bcd197c8SManish Chopra 		if (p_hwfn->qm_info.wfq_data[i].configured)
1877bcd197c8SManish Chopra 			continue;
1878bcd197c8SManish Chopra 
1879bcd197c8SManish Chopra 		p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
1880bcd197c8SManish Chopra 	}
1881bcd197c8SManish Chopra 
1882bcd197c8SManish Chopra 	return 0;
1883bcd197c8SManish Chopra }
1884bcd197c8SManish Chopra 
1885bcd197c8SManish Chopra static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
1886bcd197c8SManish Chopra 						 struct qed_ptt *p_ptt,
1887bcd197c8SManish Chopra 						 u32 min_pf_rate)
1888bcd197c8SManish Chopra {
1889bcd197c8SManish Chopra 	bool use_wfq = false;
1890bcd197c8SManish Chopra 	int rc = 0;
1891bcd197c8SManish Chopra 	u16 i;
1892bcd197c8SManish Chopra 
1893bcd197c8SManish Chopra 	/* Validate all pre configured vports for wfq */
1894bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1895bcd197c8SManish Chopra 		u32 rate;
1896bcd197c8SManish Chopra 
1897bcd197c8SManish Chopra 		if (!p_hwfn->qm_info.wfq_data[i].configured)
1898bcd197c8SManish Chopra 			continue;
1899bcd197c8SManish Chopra 
1900bcd197c8SManish Chopra 		rate = p_hwfn->qm_info.wfq_data[i].min_speed;
1901bcd197c8SManish Chopra 		use_wfq = true;
1902bcd197c8SManish Chopra 
1903bcd197c8SManish Chopra 		rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
1904bcd197c8SManish Chopra 		if (rc) {
1905bcd197c8SManish Chopra 			DP_NOTICE(p_hwfn,
1906bcd197c8SManish Chopra 				  "WFQ validation failed while configuring min rate\n");
1907bcd197c8SManish Chopra 			break;
1908bcd197c8SManish Chopra 		}
1909bcd197c8SManish Chopra 	}
1910bcd197c8SManish Chopra 
1911bcd197c8SManish Chopra 	if (!rc && use_wfq)
1912bcd197c8SManish Chopra 		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
1913bcd197c8SManish Chopra 	else
1914bcd197c8SManish Chopra 		qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
1915bcd197c8SManish Chopra 
1916bcd197c8SManish Chopra 	return rc;
1917bcd197c8SManish Chopra }
1918bcd197c8SManish Chopra 
1919bcd197c8SManish Chopra /* API to configure WFQ from mcp link change */
1920bcd197c8SManish Chopra void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
1921bcd197c8SManish Chopra {
1922bcd197c8SManish Chopra 	int i;
1923bcd197c8SManish Chopra 
1924bcd197c8SManish Chopra 	for_each_hwfn(cdev, i) {
1925bcd197c8SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1926bcd197c8SManish Chopra 
1927bcd197c8SManish Chopra 		__qed_configure_vp_wfq_on_link_change(p_hwfn,
1928bcd197c8SManish Chopra 						      p_hwfn->p_dpc_ptt,
1929bcd197c8SManish Chopra 						      min_pf_rate);
1930bcd197c8SManish Chopra 	}
1931bcd197c8SManish Chopra }
19324b01e519SManish Chopra 
19334b01e519SManish Chopra int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
19344b01e519SManish Chopra 				     struct qed_ptt *p_ptt,
19354b01e519SManish Chopra 				     struct qed_mcp_link_state *p_link,
19364b01e519SManish Chopra 				     u8 max_bw)
19374b01e519SManish Chopra {
19384b01e519SManish Chopra 	int rc = 0;
19394b01e519SManish Chopra 
19404b01e519SManish Chopra 	p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
19414b01e519SManish Chopra 
19424b01e519SManish Chopra 	if (!p_link->line_speed && (max_bw != 100))
19434b01e519SManish Chopra 		return rc;
19444b01e519SManish Chopra 
19454b01e519SManish Chopra 	p_link->speed = (p_link->line_speed * max_bw) / 100;
19464b01e519SManish Chopra 	p_hwfn->qm_info.pf_rl = p_link->speed;
19474b01e519SManish Chopra 
19484b01e519SManish Chopra 	/* Since the limiter also affects Tx-switched traffic, we don't want it
19494b01e519SManish Chopra 	 * to limit such traffic in case there's no actual limit.
19504b01e519SManish Chopra 	 * In that case, set limit to imaginary high boundary.
19514b01e519SManish Chopra 	 */
19524b01e519SManish Chopra 	if (max_bw == 100)
19534b01e519SManish Chopra 		p_hwfn->qm_info.pf_rl = 100000;
19544b01e519SManish Chopra 
19554b01e519SManish Chopra 	rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
19564b01e519SManish Chopra 			    p_hwfn->qm_info.pf_rl);
19574b01e519SManish Chopra 
19584b01e519SManish Chopra 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
19594b01e519SManish Chopra 		   "Configured MAX bandwidth to be %08x Mb/sec\n",
19604b01e519SManish Chopra 		   p_link->speed);
19614b01e519SManish Chopra 
19624b01e519SManish Chopra 	return rc;
19634b01e519SManish Chopra }
19644b01e519SManish Chopra 
19654b01e519SManish Chopra /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
19664b01e519SManish Chopra int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
19674b01e519SManish Chopra {
19684b01e519SManish Chopra 	int i, rc = -EINVAL;
19694b01e519SManish Chopra 
19704b01e519SManish Chopra 	if (max_bw < 1 || max_bw > 100) {
19714b01e519SManish Chopra 		DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
19724b01e519SManish Chopra 		return rc;
19734b01e519SManish Chopra 	}
19744b01e519SManish Chopra 
19754b01e519SManish Chopra 	for_each_hwfn(cdev, i) {
19764b01e519SManish Chopra 		struct qed_hwfn	*p_hwfn = &cdev->hwfns[i];
19774b01e519SManish Chopra 		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
19784b01e519SManish Chopra 		struct qed_mcp_link_state *p_link;
19794b01e519SManish Chopra 		struct qed_ptt *p_ptt;
19804b01e519SManish Chopra 
19814b01e519SManish Chopra 		p_link = &p_lead->mcp_info->link_output;
19824b01e519SManish Chopra 
19834b01e519SManish Chopra 		p_ptt = qed_ptt_acquire(p_hwfn);
19844b01e519SManish Chopra 		if (!p_ptt)
19854b01e519SManish Chopra 			return -EBUSY;
19864b01e519SManish Chopra 
19874b01e519SManish Chopra 		rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
19884b01e519SManish Chopra 						      p_link, max_bw);
19894b01e519SManish Chopra 
19904b01e519SManish Chopra 		qed_ptt_release(p_hwfn, p_ptt);
19914b01e519SManish Chopra 
19924b01e519SManish Chopra 		if (rc)
19934b01e519SManish Chopra 			break;
19944b01e519SManish Chopra 	}
19954b01e519SManish Chopra 
19964b01e519SManish Chopra 	return rc;
19974b01e519SManish Chopra }
1998a64b02d5SManish Chopra 
1999a64b02d5SManish Chopra int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
2000a64b02d5SManish Chopra 				     struct qed_ptt *p_ptt,
2001a64b02d5SManish Chopra 				     struct qed_mcp_link_state *p_link,
2002a64b02d5SManish Chopra 				     u8 min_bw)
2003a64b02d5SManish Chopra {
2004a64b02d5SManish Chopra 	int rc = 0;
2005a64b02d5SManish Chopra 
2006a64b02d5SManish Chopra 	p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
2007a64b02d5SManish Chopra 	p_hwfn->qm_info.pf_wfq = min_bw;
2008a64b02d5SManish Chopra 
2009a64b02d5SManish Chopra 	if (!p_link->line_speed)
2010a64b02d5SManish Chopra 		return rc;
2011a64b02d5SManish Chopra 
2012a64b02d5SManish Chopra 	p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
2013a64b02d5SManish Chopra 
2014a64b02d5SManish Chopra 	rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
2015a64b02d5SManish Chopra 
2016a64b02d5SManish Chopra 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
2017a64b02d5SManish Chopra 		   "Configured MIN bandwidth to be %d Mb/sec\n",
2018a64b02d5SManish Chopra 		   p_link->min_pf_rate);
2019a64b02d5SManish Chopra 
2020a64b02d5SManish Chopra 	return rc;
2021a64b02d5SManish Chopra }
2022a64b02d5SManish Chopra 
2023a64b02d5SManish Chopra /* Main API to configure PF min bandwidth where bw range is [1-100] */
2024a64b02d5SManish Chopra int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
2025a64b02d5SManish Chopra {
2026a64b02d5SManish Chopra 	int i, rc = -EINVAL;
2027a64b02d5SManish Chopra 
2028a64b02d5SManish Chopra 	if (min_bw < 1 || min_bw > 100) {
2029a64b02d5SManish Chopra 		DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
2030a64b02d5SManish Chopra 		return rc;
2031a64b02d5SManish Chopra 	}
2032a64b02d5SManish Chopra 
2033a64b02d5SManish Chopra 	for_each_hwfn(cdev, i) {
2034a64b02d5SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2035a64b02d5SManish Chopra 		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
2036a64b02d5SManish Chopra 		struct qed_mcp_link_state *p_link;
2037a64b02d5SManish Chopra 		struct qed_ptt *p_ptt;
2038a64b02d5SManish Chopra 
2039a64b02d5SManish Chopra 		p_link = &p_lead->mcp_info->link_output;
2040a64b02d5SManish Chopra 
2041a64b02d5SManish Chopra 		p_ptt = qed_ptt_acquire(p_hwfn);
2042a64b02d5SManish Chopra 		if (!p_ptt)
2043a64b02d5SManish Chopra 			return -EBUSY;
2044a64b02d5SManish Chopra 
2045a64b02d5SManish Chopra 		rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
2046a64b02d5SManish Chopra 						      p_link, min_bw);
2047a64b02d5SManish Chopra 		if (rc) {
2048a64b02d5SManish Chopra 			qed_ptt_release(p_hwfn, p_ptt);
2049a64b02d5SManish Chopra 			return rc;
2050a64b02d5SManish Chopra 		}
2051a64b02d5SManish Chopra 
2052a64b02d5SManish Chopra 		if (p_link->min_pf_rate) {
2053a64b02d5SManish Chopra 			u32 min_rate = p_link->min_pf_rate;
2054a64b02d5SManish Chopra 
2055a64b02d5SManish Chopra 			rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
2056a64b02d5SManish Chopra 								   p_ptt,
2057a64b02d5SManish Chopra 								   min_rate);
2058a64b02d5SManish Chopra 		}
2059a64b02d5SManish Chopra 
2060a64b02d5SManish Chopra 		qed_ptt_release(p_hwfn, p_ptt);
2061a64b02d5SManish Chopra 	}
2062a64b02d5SManish Chopra 
2063a64b02d5SManish Chopra 	return rc;
2064a64b02d5SManish Chopra }
2065