1fe56b9e6SYuval Mintz /* QLogic qed NIC Driver
2fe56b9e6SYuval Mintz  * Copyright (c) 2015 QLogic Corporation
3fe56b9e6SYuval Mintz  *
4fe56b9e6SYuval Mintz  * This software is available under the terms of the GNU General Public License
5fe56b9e6SYuval Mintz  * (GPL) Version 2, available from the file COPYING in the main directory of
6fe56b9e6SYuval Mintz  * this source tree.
7fe56b9e6SYuval Mintz  */
8fe56b9e6SYuval Mintz 
9fe56b9e6SYuval Mintz #include <linux/types.h>
10fe56b9e6SYuval Mintz #include <asm/byteorder.h>
11fe56b9e6SYuval Mintz #include <linux/io.h>
12fe56b9e6SYuval Mintz #include <linux/delay.h>
13fe56b9e6SYuval Mintz #include <linux/dma-mapping.h>
14fe56b9e6SYuval Mintz #include <linux/errno.h>
15fe56b9e6SYuval Mintz #include <linux/kernel.h>
16fe56b9e6SYuval Mintz #include <linux/mutex.h>
17fe56b9e6SYuval Mintz #include <linux/pci.h>
18fe56b9e6SYuval Mintz #include <linux/slab.h>
19fe56b9e6SYuval Mintz #include <linux/string.h>
20fe56b9e6SYuval Mintz #include <linux/etherdevice.h>
21fe56b9e6SYuval Mintz #include <linux/qed/qed_chain.h>
22fe56b9e6SYuval Mintz #include <linux/qed/qed_if.h>
23fe56b9e6SYuval Mintz #include "qed.h"
24fe56b9e6SYuval Mintz #include "qed_cxt.h"
25fe56b9e6SYuval Mintz #include "qed_dev_api.h"
26fe56b9e6SYuval Mintz #include "qed_hsi.h"
27fe56b9e6SYuval Mintz #include "qed_hw.h"
28fe56b9e6SYuval Mintz #include "qed_init_ops.h"
29fe56b9e6SYuval Mintz #include "qed_int.h"
30fe56b9e6SYuval Mintz #include "qed_mcp.h"
31fe56b9e6SYuval Mintz #include "qed_reg_addr.h"
32fe56b9e6SYuval Mintz #include "qed_sp.h"
3332a47e72SYuval Mintz #include "qed_sriov.h"
340b55e27dSYuval Mintz #include "qed_vf.h"
35fe56b9e6SYuval Mintz 
36fe56b9e6SYuval Mintz /* API common to all protocols */
37c2035eeaSRam Amrani enum BAR_ID {
38c2035eeaSRam Amrani 	BAR_ID_0,       /* used for GRC */
39c2035eeaSRam Amrani 	BAR_ID_1        /* Used for doorbells */
40c2035eeaSRam Amrani };
41c2035eeaSRam Amrani 
42c2035eeaSRam Amrani static u32 qed_hw_bar_size(struct qed_hwfn	*p_hwfn,
43c2035eeaSRam Amrani 			   enum BAR_ID		bar_id)
44c2035eeaSRam Amrani {
45c2035eeaSRam Amrani 	u32 bar_reg = (bar_id == BAR_ID_0 ?
46c2035eeaSRam Amrani 		       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
471408cc1fSYuval Mintz 	u32 val;
48c2035eeaSRam Amrani 
491408cc1fSYuval Mintz 	if (IS_VF(p_hwfn->cdev))
501408cc1fSYuval Mintz 		return 1 << 17;
511408cc1fSYuval Mintz 
521408cc1fSYuval Mintz 	val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
53c2035eeaSRam Amrani 	if (val)
54c2035eeaSRam Amrani 		return 1 << (val + 15);
55c2035eeaSRam Amrani 
56c2035eeaSRam Amrani 	/* Old MFW initialized above registered only conditionally */
57c2035eeaSRam Amrani 	if (p_hwfn->cdev->num_hwfns > 1) {
58c2035eeaSRam Amrani 		DP_INFO(p_hwfn,
59c2035eeaSRam Amrani 			"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
60c2035eeaSRam Amrani 			return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
61c2035eeaSRam Amrani 	} else {
62c2035eeaSRam Amrani 		DP_INFO(p_hwfn,
63c2035eeaSRam Amrani 			"BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
64c2035eeaSRam Amrani 			return 512 * 1024;
65c2035eeaSRam Amrani 	}
66c2035eeaSRam Amrani }
67c2035eeaSRam Amrani 
68fe56b9e6SYuval Mintz void qed_init_dp(struct qed_dev *cdev,
69fe56b9e6SYuval Mintz 		 u32 dp_module, u8 dp_level)
70fe56b9e6SYuval Mintz {
71fe56b9e6SYuval Mintz 	u32 i;
72fe56b9e6SYuval Mintz 
73fe56b9e6SYuval Mintz 	cdev->dp_level = dp_level;
74fe56b9e6SYuval Mintz 	cdev->dp_module = dp_module;
75fe56b9e6SYuval Mintz 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
76fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
77fe56b9e6SYuval Mintz 
78fe56b9e6SYuval Mintz 		p_hwfn->dp_level = dp_level;
79fe56b9e6SYuval Mintz 		p_hwfn->dp_module = dp_module;
80fe56b9e6SYuval Mintz 	}
81fe56b9e6SYuval Mintz }
82fe56b9e6SYuval Mintz 
83fe56b9e6SYuval Mintz void qed_init_struct(struct qed_dev *cdev)
84fe56b9e6SYuval Mintz {
85fe56b9e6SYuval Mintz 	u8 i;
86fe56b9e6SYuval Mintz 
87fe56b9e6SYuval Mintz 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
88fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
89fe56b9e6SYuval Mintz 
90fe56b9e6SYuval Mintz 		p_hwfn->cdev = cdev;
91fe56b9e6SYuval Mintz 		p_hwfn->my_id = i;
92fe56b9e6SYuval Mintz 		p_hwfn->b_active = false;
93fe56b9e6SYuval Mintz 
94fe56b9e6SYuval Mintz 		mutex_init(&p_hwfn->dmae_info.mutex);
95fe56b9e6SYuval Mintz 	}
96fe56b9e6SYuval Mintz 
97fe56b9e6SYuval Mintz 	/* hwfn 0 is always active */
98fe56b9e6SYuval Mintz 	cdev->hwfns[0].b_active = true;
99fe56b9e6SYuval Mintz 
100fe56b9e6SYuval Mintz 	/* set the default cache alignment to 128 */
101fe56b9e6SYuval Mintz 	cdev->cache_shift = 7;
102fe56b9e6SYuval Mintz }
103fe56b9e6SYuval Mintz 
104fe56b9e6SYuval Mintz static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
105fe56b9e6SYuval Mintz {
106fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
107fe56b9e6SYuval Mintz 
108fe56b9e6SYuval Mintz 	kfree(qm_info->qm_pq_params);
109fe56b9e6SYuval Mintz 	qm_info->qm_pq_params = NULL;
110fe56b9e6SYuval Mintz 	kfree(qm_info->qm_vport_params);
111fe56b9e6SYuval Mintz 	qm_info->qm_vport_params = NULL;
112fe56b9e6SYuval Mintz 	kfree(qm_info->qm_port_params);
113fe56b9e6SYuval Mintz 	qm_info->qm_port_params = NULL;
114bcd197c8SManish Chopra 	kfree(qm_info->wfq_data);
115bcd197c8SManish Chopra 	qm_info->wfq_data = NULL;
116fe56b9e6SYuval Mintz }
117fe56b9e6SYuval Mintz 
118fe56b9e6SYuval Mintz void qed_resc_free(struct qed_dev *cdev)
119fe56b9e6SYuval Mintz {
120fe56b9e6SYuval Mintz 	int i;
121fe56b9e6SYuval Mintz 
1221408cc1fSYuval Mintz 	if (IS_VF(cdev))
1231408cc1fSYuval Mintz 		return;
1241408cc1fSYuval Mintz 
125fe56b9e6SYuval Mintz 	kfree(cdev->fw_data);
126fe56b9e6SYuval Mintz 	cdev->fw_data = NULL;
127fe56b9e6SYuval Mintz 
128fe56b9e6SYuval Mintz 	kfree(cdev->reset_stats);
129fe56b9e6SYuval Mintz 
130fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
131fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
132fe56b9e6SYuval Mintz 
13325c089d7SYuval Mintz 		kfree(p_hwfn->p_tx_cids);
13425c089d7SYuval Mintz 		p_hwfn->p_tx_cids = NULL;
13525c089d7SYuval Mintz 		kfree(p_hwfn->p_rx_cids);
13625c089d7SYuval Mintz 		p_hwfn->p_rx_cids = NULL;
13725c089d7SYuval Mintz 	}
13825c089d7SYuval Mintz 
13925c089d7SYuval Mintz 	for_each_hwfn(cdev, i) {
14025c089d7SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
14125c089d7SYuval Mintz 
142fe56b9e6SYuval Mintz 		qed_cxt_mngr_free(p_hwfn);
143fe56b9e6SYuval Mintz 		qed_qm_info_free(p_hwfn);
144fe56b9e6SYuval Mintz 		qed_spq_free(p_hwfn);
145fe56b9e6SYuval Mintz 		qed_eq_free(p_hwfn, p_hwfn->p_eq);
146fe56b9e6SYuval Mintz 		qed_consq_free(p_hwfn, p_hwfn->p_consq);
147fe56b9e6SYuval Mintz 		qed_int_free(p_hwfn);
14832a47e72SYuval Mintz 		qed_iov_free(p_hwfn);
149fe56b9e6SYuval Mintz 		qed_dmae_info_free(p_hwfn);
150fe56b9e6SYuval Mintz 	}
151fe56b9e6SYuval Mintz }
152fe56b9e6SYuval Mintz 
153fe56b9e6SYuval Mintz static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
154fe56b9e6SYuval Mintz {
1551408cc1fSYuval Mintz 	u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
156fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
157fe56b9e6SYuval Mintz 	struct init_qm_port_params *p_qm_port;
158fe56b9e6SYuval Mintz 	u16 num_pqs, multi_cos_tcs = 1;
1591408cc1fSYuval Mintz 	u16 num_vfs = 0;
160fe56b9e6SYuval Mintz 
1611408cc1fSYuval Mintz #ifdef CONFIG_QED_SRIOV
1621408cc1fSYuval Mintz 	if (p_hwfn->cdev->p_iov_info)
1631408cc1fSYuval Mintz 		num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
1641408cc1fSYuval Mintz #endif
165fe56b9e6SYuval Mintz 	memset(qm_info, 0, sizeof(*qm_info));
166fe56b9e6SYuval Mintz 
1671408cc1fSYuval Mintz 	num_pqs = multi_cos_tcs + num_vfs + 1;	/* The '1' is for pure-LB */
168fe56b9e6SYuval Mintz 	num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
169fe56b9e6SYuval Mintz 
170fe56b9e6SYuval Mintz 	/* Sanity checking that setup requires legal number of resources */
171fe56b9e6SYuval Mintz 	if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
172fe56b9e6SYuval Mintz 		DP_ERR(p_hwfn,
173fe56b9e6SYuval Mintz 		       "Need too many Physical queues - 0x%04x when only %04x are available\n",
174fe56b9e6SYuval Mintz 		       num_pqs, RESC_NUM(p_hwfn, QED_PQ));
175fe56b9e6SYuval Mintz 		return -EINVAL;
176fe56b9e6SYuval Mintz 	}
177fe56b9e6SYuval Mintz 
178fe56b9e6SYuval Mintz 	/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
179fe56b9e6SYuval Mintz 	 */
180fe56b9e6SYuval Mintz 	qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
18160fffb3bSYuval Mintz 					num_pqs, GFP_KERNEL);
182fe56b9e6SYuval Mintz 	if (!qm_info->qm_pq_params)
183fe56b9e6SYuval Mintz 		goto alloc_err;
184fe56b9e6SYuval Mintz 
185fe56b9e6SYuval Mintz 	qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
18660fffb3bSYuval Mintz 					   num_vports, GFP_KERNEL);
187fe56b9e6SYuval Mintz 	if (!qm_info->qm_vport_params)
188fe56b9e6SYuval Mintz 		goto alloc_err;
189fe56b9e6SYuval Mintz 
190fe56b9e6SYuval Mintz 	qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
19160fffb3bSYuval Mintz 					  MAX_NUM_PORTS, GFP_KERNEL);
192fe56b9e6SYuval Mintz 	if (!qm_info->qm_port_params)
193fe56b9e6SYuval Mintz 		goto alloc_err;
194fe56b9e6SYuval Mintz 
195bcd197c8SManish Chopra 	qm_info->wfq_data = kcalloc(num_vports, sizeof(*qm_info->wfq_data),
196bcd197c8SManish Chopra 				    GFP_KERNEL);
197bcd197c8SManish Chopra 	if (!qm_info->wfq_data)
198bcd197c8SManish Chopra 		goto alloc_err;
199bcd197c8SManish Chopra 
200fe56b9e6SYuval Mintz 	vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
201fe56b9e6SYuval Mintz 
202fe56b9e6SYuval Mintz 	/* First init per-TC PQs */
2031408cc1fSYuval Mintz 	for (i = 0; i < multi_cos_tcs; i++, curr_queue++) {
2041408cc1fSYuval Mintz 		struct init_qm_pq_params *params =
2051408cc1fSYuval Mintz 		    &qm_info->qm_pq_params[curr_queue];
206fe56b9e6SYuval Mintz 
207fe56b9e6SYuval Mintz 		params->vport_id = vport_id;
208fe56b9e6SYuval Mintz 		params->tc_id = p_hwfn->hw_info.non_offload_tc;
209fe56b9e6SYuval Mintz 		params->wrr_group = 1;
210fe56b9e6SYuval Mintz 	}
211fe56b9e6SYuval Mintz 
212fe56b9e6SYuval Mintz 	/* Then init pure-LB PQ */
2131408cc1fSYuval Mintz 	qm_info->pure_lb_pq = curr_queue;
2141408cc1fSYuval Mintz 	qm_info->qm_pq_params[curr_queue].vport_id =
2151408cc1fSYuval Mintz 	    (u8) RESC_START(p_hwfn, QED_VPORT);
2161408cc1fSYuval Mintz 	qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
2171408cc1fSYuval Mintz 	qm_info->qm_pq_params[curr_queue].wrr_group = 1;
2181408cc1fSYuval Mintz 	curr_queue++;
219fe56b9e6SYuval Mintz 
220fe56b9e6SYuval Mintz 	qm_info->offload_pq = 0;
2211408cc1fSYuval Mintz 	/* Then init per-VF PQs */
2221408cc1fSYuval Mintz 	vf_offset = curr_queue;
2231408cc1fSYuval Mintz 	for (i = 0; i < num_vfs; i++) {
2241408cc1fSYuval Mintz 		/* First vport is used by the PF */
2251408cc1fSYuval Mintz 		qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
2261408cc1fSYuval Mintz 		qm_info->qm_pq_params[curr_queue].tc_id =
2271408cc1fSYuval Mintz 		    p_hwfn->hw_info.non_offload_tc;
2281408cc1fSYuval Mintz 		qm_info->qm_pq_params[curr_queue].wrr_group = 1;
2291408cc1fSYuval Mintz 		curr_queue++;
2301408cc1fSYuval Mintz 	}
2311408cc1fSYuval Mintz 
2321408cc1fSYuval Mintz 	qm_info->vf_queues_offset = vf_offset;
233fe56b9e6SYuval Mintz 	qm_info->num_pqs = num_pqs;
234fe56b9e6SYuval Mintz 	qm_info->num_vports = num_vports;
235fe56b9e6SYuval Mintz 
236fe56b9e6SYuval Mintz 	/* Initialize qm port parameters */
237fe56b9e6SYuval Mintz 	num_ports = p_hwfn->cdev->num_ports_in_engines;
238fe56b9e6SYuval Mintz 	for (i = 0; i < num_ports; i++) {
239fe56b9e6SYuval Mintz 		p_qm_port = &qm_info->qm_port_params[i];
240fe56b9e6SYuval Mintz 		p_qm_port->active = 1;
241fe56b9e6SYuval Mintz 		p_qm_port->num_active_phys_tcs = 4;
242fe56b9e6SYuval Mintz 		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
243fe56b9e6SYuval Mintz 		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
244fe56b9e6SYuval Mintz 	}
245fe56b9e6SYuval Mintz 
246fe56b9e6SYuval Mintz 	qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
247fe56b9e6SYuval Mintz 
248fe56b9e6SYuval Mintz 	qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
249fe56b9e6SYuval Mintz 
2501408cc1fSYuval Mintz 	qm_info->num_vf_pqs = num_vfs;
251fe56b9e6SYuval Mintz 	qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
252fe56b9e6SYuval Mintz 
253a64b02d5SManish Chopra 	for (i = 0; i < qm_info->num_vports; i++)
254a64b02d5SManish Chopra 		qm_info->qm_vport_params[i].vport_wfq = 1;
255a64b02d5SManish Chopra 
256fe56b9e6SYuval Mintz 	qm_info->pf_wfq = 0;
257fe56b9e6SYuval Mintz 	qm_info->pf_rl = 0;
258fe56b9e6SYuval Mintz 	qm_info->vport_rl_en = 1;
259a64b02d5SManish Chopra 	qm_info->vport_wfq_en = 1;
260fe56b9e6SYuval Mintz 
261fe56b9e6SYuval Mintz 	return 0;
262fe56b9e6SYuval Mintz 
263fe56b9e6SYuval Mintz alloc_err:
264fe56b9e6SYuval Mintz 	DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
265bcd197c8SManish Chopra 	qed_qm_info_free(p_hwfn);
266fe56b9e6SYuval Mintz 	return -ENOMEM;
267fe56b9e6SYuval Mintz }
268fe56b9e6SYuval Mintz 
269fe56b9e6SYuval Mintz int qed_resc_alloc(struct qed_dev *cdev)
270fe56b9e6SYuval Mintz {
271fe56b9e6SYuval Mintz 	struct qed_consq *p_consq;
272fe56b9e6SYuval Mintz 	struct qed_eq *p_eq;
273fe56b9e6SYuval Mintz 	int i, rc = 0;
274fe56b9e6SYuval Mintz 
2751408cc1fSYuval Mintz 	if (IS_VF(cdev))
2761408cc1fSYuval Mintz 		return rc;
2771408cc1fSYuval Mintz 
278fe56b9e6SYuval Mintz 	cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
279fe56b9e6SYuval Mintz 	if (!cdev->fw_data)
280fe56b9e6SYuval Mintz 		return -ENOMEM;
281fe56b9e6SYuval Mintz 
28225c089d7SYuval Mintz 	/* Allocate Memory for the Queue->CID mapping */
28325c089d7SYuval Mintz 	for_each_hwfn(cdev, i) {
28425c089d7SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
28525c089d7SYuval Mintz 		int tx_size = sizeof(struct qed_hw_cid_data) *
28625c089d7SYuval Mintz 				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
28725c089d7SYuval Mintz 		int rx_size = sizeof(struct qed_hw_cid_data) *
28825c089d7SYuval Mintz 				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
28925c089d7SYuval Mintz 
29025c089d7SYuval Mintz 		p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
29125c089d7SYuval Mintz 		if (!p_hwfn->p_tx_cids) {
29225c089d7SYuval Mintz 			DP_NOTICE(p_hwfn,
29325c089d7SYuval Mintz 				  "Failed to allocate memory for Tx Cids\n");
2949b15acbfSDan Carpenter 			rc = -ENOMEM;
29525c089d7SYuval Mintz 			goto alloc_err;
29625c089d7SYuval Mintz 		}
29725c089d7SYuval Mintz 
29825c089d7SYuval Mintz 		p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
29925c089d7SYuval Mintz 		if (!p_hwfn->p_rx_cids) {
30025c089d7SYuval Mintz 			DP_NOTICE(p_hwfn,
30125c089d7SYuval Mintz 				  "Failed to allocate memory for Rx Cids\n");
3029b15acbfSDan Carpenter 			rc = -ENOMEM;
30325c089d7SYuval Mintz 			goto alloc_err;
30425c089d7SYuval Mintz 		}
30525c089d7SYuval Mintz 	}
30625c089d7SYuval Mintz 
307fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
308fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
309fe56b9e6SYuval Mintz 
310fe56b9e6SYuval Mintz 		/* First allocate the context manager structure */
311fe56b9e6SYuval Mintz 		rc = qed_cxt_mngr_alloc(p_hwfn);
312fe56b9e6SYuval Mintz 		if (rc)
313fe56b9e6SYuval Mintz 			goto alloc_err;
314fe56b9e6SYuval Mintz 
315fe56b9e6SYuval Mintz 		/* Set the HW cid/tid numbers (in the contest manager)
316fe56b9e6SYuval Mintz 		 * Must be done prior to any further computations.
317fe56b9e6SYuval Mintz 		 */
318fe56b9e6SYuval Mintz 		rc = qed_cxt_set_pf_params(p_hwfn);
319fe56b9e6SYuval Mintz 		if (rc)
320fe56b9e6SYuval Mintz 			goto alloc_err;
321fe56b9e6SYuval Mintz 
322fe56b9e6SYuval Mintz 		/* Prepare and process QM requirements */
323fe56b9e6SYuval Mintz 		rc = qed_init_qm_info(p_hwfn);
324fe56b9e6SYuval Mintz 		if (rc)
325fe56b9e6SYuval Mintz 			goto alloc_err;
326fe56b9e6SYuval Mintz 
327fe56b9e6SYuval Mintz 		/* Compute the ILT client partition */
328fe56b9e6SYuval Mintz 		rc = qed_cxt_cfg_ilt_compute(p_hwfn);
329fe56b9e6SYuval Mintz 		if (rc)
330fe56b9e6SYuval Mintz 			goto alloc_err;
331fe56b9e6SYuval Mintz 
332fe56b9e6SYuval Mintz 		/* CID map / ILT shadow table / T2
333fe56b9e6SYuval Mintz 		 * The talbes sizes are determined by the computations above
334fe56b9e6SYuval Mintz 		 */
335fe56b9e6SYuval Mintz 		rc = qed_cxt_tables_alloc(p_hwfn);
336fe56b9e6SYuval Mintz 		if (rc)
337fe56b9e6SYuval Mintz 			goto alloc_err;
338fe56b9e6SYuval Mintz 
339fe56b9e6SYuval Mintz 		/* SPQ, must follow ILT because initializes SPQ context */
340fe56b9e6SYuval Mintz 		rc = qed_spq_alloc(p_hwfn);
341fe56b9e6SYuval Mintz 		if (rc)
342fe56b9e6SYuval Mintz 			goto alloc_err;
343fe56b9e6SYuval Mintz 
344fe56b9e6SYuval Mintz 		/* SP status block allocation */
345fe56b9e6SYuval Mintz 		p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
346fe56b9e6SYuval Mintz 							 RESERVED_PTT_DPC);
347fe56b9e6SYuval Mintz 
348fe56b9e6SYuval Mintz 		rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
349fe56b9e6SYuval Mintz 		if (rc)
350fe56b9e6SYuval Mintz 			goto alloc_err;
351fe56b9e6SYuval Mintz 
35232a47e72SYuval Mintz 		rc = qed_iov_alloc(p_hwfn);
35332a47e72SYuval Mintz 		if (rc)
35432a47e72SYuval Mintz 			goto alloc_err;
35532a47e72SYuval Mintz 
356fe56b9e6SYuval Mintz 		/* EQ */
357fe56b9e6SYuval Mintz 		p_eq = qed_eq_alloc(p_hwfn, 256);
3589b15acbfSDan Carpenter 		if (!p_eq) {
3599b15acbfSDan Carpenter 			rc = -ENOMEM;
360fe56b9e6SYuval Mintz 			goto alloc_err;
3619b15acbfSDan Carpenter 		}
362fe56b9e6SYuval Mintz 		p_hwfn->p_eq = p_eq;
363fe56b9e6SYuval Mintz 
364fe56b9e6SYuval Mintz 		p_consq = qed_consq_alloc(p_hwfn);
3659b15acbfSDan Carpenter 		if (!p_consq) {
3669b15acbfSDan Carpenter 			rc = -ENOMEM;
367fe56b9e6SYuval Mintz 			goto alloc_err;
3689b15acbfSDan Carpenter 		}
369fe56b9e6SYuval Mintz 		p_hwfn->p_consq = p_consq;
370fe56b9e6SYuval Mintz 
371fe56b9e6SYuval Mintz 		/* DMA info initialization */
372fe56b9e6SYuval Mintz 		rc = qed_dmae_info_alloc(p_hwfn);
373fe56b9e6SYuval Mintz 		if (rc) {
374fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn,
375fe56b9e6SYuval Mintz 				  "Failed to allocate memory for dmae_info structure\n");
376fe56b9e6SYuval Mintz 			goto alloc_err;
377fe56b9e6SYuval Mintz 		}
378fe56b9e6SYuval Mintz 	}
379fe56b9e6SYuval Mintz 
380fe56b9e6SYuval Mintz 	cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
381fe56b9e6SYuval Mintz 	if (!cdev->reset_stats) {
382fe56b9e6SYuval Mintz 		DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
3839b15acbfSDan Carpenter 		rc = -ENOMEM;
384fe56b9e6SYuval Mintz 		goto alloc_err;
385fe56b9e6SYuval Mintz 	}
386fe56b9e6SYuval Mintz 
387fe56b9e6SYuval Mintz 	return 0;
388fe56b9e6SYuval Mintz 
389fe56b9e6SYuval Mintz alloc_err:
390fe56b9e6SYuval Mintz 	qed_resc_free(cdev);
391fe56b9e6SYuval Mintz 	return rc;
392fe56b9e6SYuval Mintz }
393fe56b9e6SYuval Mintz 
394fe56b9e6SYuval Mintz void qed_resc_setup(struct qed_dev *cdev)
395fe56b9e6SYuval Mintz {
396fe56b9e6SYuval Mintz 	int i;
397fe56b9e6SYuval Mintz 
3981408cc1fSYuval Mintz 	if (IS_VF(cdev))
3991408cc1fSYuval Mintz 		return;
4001408cc1fSYuval Mintz 
401fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
402fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
403fe56b9e6SYuval Mintz 
404fe56b9e6SYuval Mintz 		qed_cxt_mngr_setup(p_hwfn);
405fe56b9e6SYuval Mintz 		qed_spq_setup(p_hwfn);
406fe56b9e6SYuval Mintz 		qed_eq_setup(p_hwfn, p_hwfn->p_eq);
407fe56b9e6SYuval Mintz 		qed_consq_setup(p_hwfn, p_hwfn->p_consq);
408fe56b9e6SYuval Mintz 
409fe56b9e6SYuval Mintz 		/* Read shadow of current MFW mailbox */
410fe56b9e6SYuval Mintz 		qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
411fe56b9e6SYuval Mintz 		memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
412fe56b9e6SYuval Mintz 		       p_hwfn->mcp_info->mfw_mb_cur,
413fe56b9e6SYuval Mintz 		       p_hwfn->mcp_info->mfw_mb_length);
414fe56b9e6SYuval Mintz 
415fe56b9e6SYuval Mintz 		qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
41632a47e72SYuval Mintz 
41732a47e72SYuval Mintz 		qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
418fe56b9e6SYuval Mintz 	}
419fe56b9e6SYuval Mintz }
420fe56b9e6SYuval Mintz 
421fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_CNT          (100)
422fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_TIME         (10)
423fe56b9e6SYuval Mintz int qed_final_cleanup(struct qed_hwfn *p_hwfn,
4240b55e27dSYuval Mintz 		      struct qed_ptt *p_ptt, u16 id, bool is_vf)
425fe56b9e6SYuval Mintz {
426fe56b9e6SYuval Mintz 	u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
427fe56b9e6SYuval Mintz 	int rc = -EBUSY;
428fe56b9e6SYuval Mintz 
429fc48b7a6SYuval Mintz 	addr = GTT_BAR0_MAP_REG_USDM_RAM +
430fc48b7a6SYuval Mintz 		USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
431fe56b9e6SYuval Mintz 
4320b55e27dSYuval Mintz 	if (is_vf)
4330b55e27dSYuval Mintz 		id += 0x10;
4340b55e27dSYuval Mintz 
435fc48b7a6SYuval Mintz 	command |= X_FINAL_CLEANUP_AGG_INT <<
436fc48b7a6SYuval Mintz 		SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
437fc48b7a6SYuval Mintz 	command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
438fc48b7a6SYuval Mintz 	command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
439fc48b7a6SYuval Mintz 	command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
440fe56b9e6SYuval Mintz 
441fe56b9e6SYuval Mintz 	/* Make sure notification is not set before initiating final cleanup */
442fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, addr)) {
443fe56b9e6SYuval Mintz 		DP_NOTICE(
444fe56b9e6SYuval Mintz 			p_hwfn,
445fe56b9e6SYuval Mintz 			"Unexpected; Found final cleanup notification before initiating final cleanup\n");
446fe56b9e6SYuval Mintz 		REG_WR(p_hwfn, addr, 0);
447fe56b9e6SYuval Mintz 	}
448fe56b9e6SYuval Mintz 
449fe56b9e6SYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
450fe56b9e6SYuval Mintz 		   "Sending final cleanup for PFVF[%d] [Command %08x\n]",
451fe56b9e6SYuval Mintz 		   id, command);
452fe56b9e6SYuval Mintz 
453fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
454fe56b9e6SYuval Mintz 
455fe56b9e6SYuval Mintz 	/* Poll until completion */
456fe56b9e6SYuval Mintz 	while (!REG_RD(p_hwfn, addr) && count--)
457fe56b9e6SYuval Mintz 		msleep(FINAL_CLEANUP_POLL_TIME);
458fe56b9e6SYuval Mintz 
459fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, addr))
460fe56b9e6SYuval Mintz 		rc = 0;
461fe56b9e6SYuval Mintz 	else
462fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn,
463fe56b9e6SYuval Mintz 			  "Failed to receive FW final cleanup notification\n");
464fe56b9e6SYuval Mintz 
465fe56b9e6SYuval Mintz 	/* Cleanup afterwards */
466fe56b9e6SYuval Mintz 	REG_WR(p_hwfn, addr, 0);
467fe56b9e6SYuval Mintz 
468fe56b9e6SYuval Mintz 	return rc;
469fe56b9e6SYuval Mintz }
470fe56b9e6SYuval Mintz 
471fe56b9e6SYuval Mintz static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
472fe56b9e6SYuval Mintz {
473fe56b9e6SYuval Mintz 	int hw_mode = 0;
474fe56b9e6SYuval Mintz 
47512e09c69SYuval Mintz 	hw_mode = (1 << MODE_BB_B0);
476fe56b9e6SYuval Mintz 
477fe56b9e6SYuval Mintz 	switch (p_hwfn->cdev->num_ports_in_engines) {
478fe56b9e6SYuval Mintz 	case 1:
479fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
480fe56b9e6SYuval Mintz 		break;
481fe56b9e6SYuval Mintz 	case 2:
482fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
483fe56b9e6SYuval Mintz 		break;
484fe56b9e6SYuval Mintz 	case 4:
485fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
486fe56b9e6SYuval Mintz 		break;
487fe56b9e6SYuval Mintz 	default:
488fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
489fe56b9e6SYuval Mintz 			  p_hwfn->cdev->num_ports_in_engines);
490fe56b9e6SYuval Mintz 		return;
491fe56b9e6SYuval Mintz 	}
492fe56b9e6SYuval Mintz 
493fe56b9e6SYuval Mintz 	switch (p_hwfn->cdev->mf_mode) {
494fc48b7a6SYuval Mintz 	case QED_MF_DEFAULT:
495fc48b7a6SYuval Mintz 	case QED_MF_NPAR:
496fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SI;
497fe56b9e6SYuval Mintz 		break;
498fc48b7a6SYuval Mintz 	case QED_MF_OVLAN:
499fc48b7a6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SD;
500fc48b7a6SYuval Mintz 		break;
501fe56b9e6SYuval Mintz 	default:
502fc48b7a6SYuval Mintz 		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
503fc48b7a6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SI;
504fe56b9e6SYuval Mintz 	}
505fe56b9e6SYuval Mintz 
506fe56b9e6SYuval Mintz 	hw_mode |= 1 << MODE_ASIC;
507fe56b9e6SYuval Mintz 
508fe56b9e6SYuval Mintz 	p_hwfn->hw_info.hw_mode = hw_mode;
509fe56b9e6SYuval Mintz }
510fe56b9e6SYuval Mintz 
511fe56b9e6SYuval Mintz /* Init run time data for all PFs on an engine. */
512fe56b9e6SYuval Mintz static void qed_init_cau_rt_data(struct qed_dev *cdev)
513fe56b9e6SYuval Mintz {
514fe56b9e6SYuval Mintz 	u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
515fe56b9e6SYuval Mintz 	int i, sb_id;
516fe56b9e6SYuval Mintz 
517fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
518fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
519fe56b9e6SYuval Mintz 		struct qed_igu_info *p_igu_info;
520fe56b9e6SYuval Mintz 		struct qed_igu_block *p_block;
521fe56b9e6SYuval Mintz 		struct cau_sb_entry sb_entry;
522fe56b9e6SYuval Mintz 
523fe56b9e6SYuval Mintz 		p_igu_info = p_hwfn->hw_info.p_igu_info;
524fe56b9e6SYuval Mintz 
525fe56b9e6SYuval Mintz 		for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
526fe56b9e6SYuval Mintz 		     sb_id++) {
527fe56b9e6SYuval Mintz 			p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
528fe56b9e6SYuval Mintz 			if (!p_block->is_pf)
529fe56b9e6SYuval Mintz 				continue;
530fe56b9e6SYuval Mintz 
531fe56b9e6SYuval Mintz 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
532fe56b9e6SYuval Mintz 					      p_block->function_id,
533fe56b9e6SYuval Mintz 					      0, 0);
534fe56b9e6SYuval Mintz 			STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
535fe56b9e6SYuval Mintz 					 sb_entry);
536fe56b9e6SYuval Mintz 		}
537fe56b9e6SYuval Mintz 	}
538fe56b9e6SYuval Mintz }
539fe56b9e6SYuval Mintz 
540fe56b9e6SYuval Mintz static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
541fe56b9e6SYuval Mintz 			      struct qed_ptt *p_ptt,
542fe56b9e6SYuval Mintz 			      int hw_mode)
543fe56b9e6SYuval Mintz {
544fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
545fe56b9e6SYuval Mintz 	struct qed_qm_common_rt_init_params params;
546fe56b9e6SYuval Mintz 	struct qed_dev *cdev = p_hwfn->cdev;
5471408cc1fSYuval Mintz 	u32 concrete_fid;
548fe56b9e6SYuval Mintz 	int rc = 0;
5491408cc1fSYuval Mintz 	u8 vf_id;
550fe56b9e6SYuval Mintz 
551fe56b9e6SYuval Mintz 	qed_init_cau_rt_data(cdev);
552fe56b9e6SYuval Mintz 
553fe56b9e6SYuval Mintz 	/* Program GTT windows */
554fe56b9e6SYuval Mintz 	qed_gtt_init(p_hwfn);
555fe56b9e6SYuval Mintz 
556fe56b9e6SYuval Mintz 	if (p_hwfn->mcp_info) {
557fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.bandwidth_max)
558fe56b9e6SYuval Mintz 			qm_info->pf_rl_en = 1;
559fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.bandwidth_min)
560fe56b9e6SYuval Mintz 			qm_info->pf_wfq_en = 1;
561fe56b9e6SYuval Mintz 	}
562fe56b9e6SYuval Mintz 
563fe56b9e6SYuval Mintz 	memset(&params, 0, sizeof(params));
564fe56b9e6SYuval Mintz 	params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
565fe56b9e6SYuval Mintz 	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
566fe56b9e6SYuval Mintz 	params.pf_rl_en = qm_info->pf_rl_en;
567fe56b9e6SYuval Mintz 	params.pf_wfq_en = qm_info->pf_wfq_en;
568fe56b9e6SYuval Mintz 	params.vport_rl_en = qm_info->vport_rl_en;
569fe56b9e6SYuval Mintz 	params.vport_wfq_en = qm_info->vport_wfq_en;
570fe56b9e6SYuval Mintz 	params.port_params = qm_info->qm_port_params;
571fe56b9e6SYuval Mintz 
572fe56b9e6SYuval Mintz 	qed_qm_common_rt_init(p_hwfn, &params);
573fe56b9e6SYuval Mintz 
574fe56b9e6SYuval Mintz 	qed_cxt_hw_init_common(p_hwfn);
575fe56b9e6SYuval Mintz 
576fe56b9e6SYuval Mintz 	/* Close gate from NIG to BRB/Storm; By default they are open, but
577fe56b9e6SYuval Mintz 	 * we close them to prevent NIG from passing data to reset blocks.
578fe56b9e6SYuval Mintz 	 * Should have been done in the ENGINE phase, but init-tool lacks
579fe56b9e6SYuval Mintz 	 * proper port-pretend capabilities.
580fe56b9e6SYuval Mintz 	 */
581fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
582fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
583fe56b9e6SYuval Mintz 	qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
584fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
585fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
586fe56b9e6SYuval Mintz 	qed_port_unpretend(p_hwfn, p_ptt);
587fe56b9e6SYuval Mintz 
588fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
589fe56b9e6SYuval Mintz 	if (rc != 0)
590fe56b9e6SYuval Mintz 		return rc;
591fe56b9e6SYuval Mintz 
592fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
593fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
594fe56b9e6SYuval Mintz 
595fe56b9e6SYuval Mintz 	/* Disable relaxed ordering in the PCI config space */
596fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, 0x20b4,
597fe56b9e6SYuval Mintz 	       qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
598fe56b9e6SYuval Mintz 
5991408cc1fSYuval Mintz 	for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
6001408cc1fSYuval Mintz 		concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
6011408cc1fSYuval Mintz 		qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
6021408cc1fSYuval Mintz 		qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
6031408cc1fSYuval Mintz 	}
6041408cc1fSYuval Mintz 	/* pretend to original PF */
6051408cc1fSYuval Mintz 	qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
6061408cc1fSYuval Mintz 
607fe56b9e6SYuval Mintz 	return rc;
608fe56b9e6SYuval Mintz }
609fe56b9e6SYuval Mintz 
610fe56b9e6SYuval Mintz static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
611fe56b9e6SYuval Mintz 			    struct qed_ptt *p_ptt,
612fe56b9e6SYuval Mintz 			    int hw_mode)
613fe56b9e6SYuval Mintz {
614fe56b9e6SYuval Mintz 	int rc = 0;
615fe56b9e6SYuval Mintz 
616fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
617fe56b9e6SYuval Mintz 			  hw_mode);
618fe56b9e6SYuval Mintz 	return rc;
619fe56b9e6SYuval Mintz }
620fe56b9e6SYuval Mintz 
621fe56b9e6SYuval Mintz static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
622fe56b9e6SYuval Mintz 			  struct qed_ptt *p_ptt,
623464f6645SManish Chopra 			  struct qed_tunn_start_params *p_tunn,
624fe56b9e6SYuval Mintz 			  int hw_mode,
625fe56b9e6SYuval Mintz 			  bool b_hw_start,
626fe56b9e6SYuval Mintz 			  enum qed_int_mode int_mode,
627fe56b9e6SYuval Mintz 			  bool allow_npar_tx_switch)
628fe56b9e6SYuval Mintz {
629fe56b9e6SYuval Mintz 	u8 rel_pf_id = p_hwfn->rel_pf_id;
630fe56b9e6SYuval Mintz 	int rc = 0;
631fe56b9e6SYuval Mintz 
632fe56b9e6SYuval Mintz 	if (p_hwfn->mcp_info) {
633fe56b9e6SYuval Mintz 		struct qed_mcp_function_info *p_info;
634fe56b9e6SYuval Mintz 
635fe56b9e6SYuval Mintz 		p_info = &p_hwfn->mcp_info->func_info;
636fe56b9e6SYuval Mintz 		if (p_info->bandwidth_min)
637fe56b9e6SYuval Mintz 			p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
638fe56b9e6SYuval Mintz 
639fe56b9e6SYuval Mintz 		/* Update rate limit once we'll actually have a link */
6404b01e519SManish Chopra 		p_hwfn->qm_info.pf_rl = 100000;
641fe56b9e6SYuval Mintz 	}
642fe56b9e6SYuval Mintz 
643fe56b9e6SYuval Mintz 	qed_cxt_hw_init_pf(p_hwfn);
644fe56b9e6SYuval Mintz 
645fe56b9e6SYuval Mintz 	qed_int_igu_init_rt(p_hwfn);
646fe56b9e6SYuval Mintz 
647fe56b9e6SYuval Mintz 	/* Set VLAN in NIG if needed */
648fe56b9e6SYuval Mintz 	if (hw_mode & (1 << MODE_MF_SD)) {
649fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
650fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
651fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
652fe56b9e6SYuval Mintz 			     p_hwfn->hw_info.ovlan);
653fe56b9e6SYuval Mintz 	}
654fe56b9e6SYuval Mintz 
655fe56b9e6SYuval Mintz 	/* Enable classification by MAC if needed */
65687aec47dSDan Carpenter 	if (hw_mode & (1 << MODE_MF_SI)) {
657fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
658fe56b9e6SYuval Mintz 			   "Configuring TAGMAC_CLS_TYPE\n");
659fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn,
660fe56b9e6SYuval Mintz 			     NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
661fe56b9e6SYuval Mintz 	}
662fe56b9e6SYuval Mintz 
663fe56b9e6SYuval Mintz 	/* Protocl Configuration  */
664fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
665fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
666fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
667fe56b9e6SYuval Mintz 
668fe56b9e6SYuval Mintz 	/* Cleanup chip from previous driver if such remains exist */
6690b55e27dSYuval Mintz 	rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
670fe56b9e6SYuval Mintz 	if (rc != 0)
671fe56b9e6SYuval Mintz 		return rc;
672fe56b9e6SYuval Mintz 
673fe56b9e6SYuval Mintz 	/* PF Init sequence */
674fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
675fe56b9e6SYuval Mintz 	if (rc)
676fe56b9e6SYuval Mintz 		return rc;
677fe56b9e6SYuval Mintz 
678fe56b9e6SYuval Mintz 	/* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
679fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
680fe56b9e6SYuval Mintz 	if (rc)
681fe56b9e6SYuval Mintz 		return rc;
682fe56b9e6SYuval Mintz 
683fe56b9e6SYuval Mintz 	/* Pure runtime initializations - directly to the HW  */
684fe56b9e6SYuval Mintz 	qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
685fe56b9e6SYuval Mintz 
686fe56b9e6SYuval Mintz 	if (b_hw_start) {
687fe56b9e6SYuval Mintz 		/* enable interrupts */
688fe56b9e6SYuval Mintz 		qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
689fe56b9e6SYuval Mintz 
690fe56b9e6SYuval Mintz 		/* send function start command */
691831bfb0eSYuval Mintz 		rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
692831bfb0eSYuval Mintz 				     allow_npar_tx_switch);
693fe56b9e6SYuval Mintz 		if (rc)
694fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
695fe56b9e6SYuval Mintz 	}
696fe56b9e6SYuval Mintz 	return rc;
697fe56b9e6SYuval Mintz }
698fe56b9e6SYuval Mintz 
699fe56b9e6SYuval Mintz static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
700fe56b9e6SYuval Mintz 			       struct qed_ptt *p_ptt,
701fe56b9e6SYuval Mintz 			       u8 enable)
702fe56b9e6SYuval Mintz {
703fe56b9e6SYuval Mintz 	u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
704fe56b9e6SYuval Mintz 
705fe56b9e6SYuval Mintz 	/* Change PF in PXP */
706fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt,
707fe56b9e6SYuval Mintz 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
708fe56b9e6SYuval Mintz 
709fe56b9e6SYuval Mintz 	/* wait until value is set - try for 1 second every 50us */
710fe56b9e6SYuval Mintz 	for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
711fe56b9e6SYuval Mintz 		val = qed_rd(p_hwfn, p_ptt,
712fe56b9e6SYuval Mintz 			     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
713fe56b9e6SYuval Mintz 		if (val == set_val)
714fe56b9e6SYuval Mintz 			break;
715fe56b9e6SYuval Mintz 
716fe56b9e6SYuval Mintz 		usleep_range(50, 60);
717fe56b9e6SYuval Mintz 	}
718fe56b9e6SYuval Mintz 
719fe56b9e6SYuval Mintz 	if (val != set_val) {
720fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn,
721fe56b9e6SYuval Mintz 			  "PFID_ENABLE_MASTER wasn't changed after a second\n");
722fe56b9e6SYuval Mintz 		return -EAGAIN;
723fe56b9e6SYuval Mintz 	}
724fe56b9e6SYuval Mintz 
725fe56b9e6SYuval Mintz 	return 0;
726fe56b9e6SYuval Mintz }
727fe56b9e6SYuval Mintz 
728fe56b9e6SYuval Mintz static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
729fe56b9e6SYuval Mintz 				struct qed_ptt *p_main_ptt)
730fe56b9e6SYuval Mintz {
731fe56b9e6SYuval Mintz 	/* Read shadow of current MFW mailbox */
732fe56b9e6SYuval Mintz 	qed_mcp_read_mb(p_hwfn, p_main_ptt);
733fe56b9e6SYuval Mintz 	memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
734fe56b9e6SYuval Mintz 	       p_hwfn->mcp_info->mfw_mb_cur,
735fe56b9e6SYuval Mintz 	       p_hwfn->mcp_info->mfw_mb_length);
736fe56b9e6SYuval Mintz }
737fe56b9e6SYuval Mintz 
738fe56b9e6SYuval Mintz int qed_hw_init(struct qed_dev *cdev,
739464f6645SManish Chopra 		struct qed_tunn_start_params *p_tunn,
740fe56b9e6SYuval Mintz 		bool b_hw_start,
741fe56b9e6SYuval Mintz 		enum qed_int_mode int_mode,
742fe56b9e6SYuval Mintz 		bool allow_npar_tx_switch,
743fe56b9e6SYuval Mintz 		const u8 *bin_fw_data)
744fe56b9e6SYuval Mintz {
74586622ee7SYuval Mintz 	u32 load_code, param;
746fe56b9e6SYuval Mintz 	int rc, mfw_rc, i;
747fe56b9e6SYuval Mintz 
7481408cc1fSYuval Mintz 	if (IS_PF(cdev)) {
749fe56b9e6SYuval Mintz 		rc = qed_init_fw_data(cdev, bin_fw_data);
750fe56b9e6SYuval Mintz 		if (rc != 0)
751fe56b9e6SYuval Mintz 			return rc;
7521408cc1fSYuval Mintz 	}
753fe56b9e6SYuval Mintz 
754fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
755fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
756fe56b9e6SYuval Mintz 
7571408cc1fSYuval Mintz 		if (IS_VF(cdev)) {
7581408cc1fSYuval Mintz 			p_hwfn->b_int_enabled = 1;
7591408cc1fSYuval Mintz 			continue;
7601408cc1fSYuval Mintz 		}
7611408cc1fSYuval Mintz 
762fe56b9e6SYuval Mintz 		/* Enable DMAE in PXP */
763fe56b9e6SYuval Mintz 		rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
764fe56b9e6SYuval Mintz 
765fe56b9e6SYuval Mintz 		qed_calc_hw_mode(p_hwfn);
766fe56b9e6SYuval Mintz 
767fe56b9e6SYuval Mintz 		rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
768fe56b9e6SYuval Mintz 				      &load_code);
769fe56b9e6SYuval Mintz 		if (rc) {
770fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
771fe56b9e6SYuval Mintz 			return rc;
772fe56b9e6SYuval Mintz 		}
773fe56b9e6SYuval Mintz 
774fe56b9e6SYuval Mintz 		qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
775fe56b9e6SYuval Mintz 
776fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
777fe56b9e6SYuval Mintz 			   "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
778fe56b9e6SYuval Mintz 			   rc, load_code);
779fe56b9e6SYuval Mintz 
780fe56b9e6SYuval Mintz 		p_hwfn->first_on_engine = (load_code ==
781fe56b9e6SYuval Mintz 					   FW_MSG_CODE_DRV_LOAD_ENGINE);
782fe56b9e6SYuval Mintz 
783fe56b9e6SYuval Mintz 		switch (load_code) {
784fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_ENGINE:
785fe56b9e6SYuval Mintz 			rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
786fe56b9e6SYuval Mintz 						p_hwfn->hw_info.hw_mode);
787fe56b9e6SYuval Mintz 			if (rc)
788fe56b9e6SYuval Mintz 				break;
789fe56b9e6SYuval Mintz 		/* Fall into */
790fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_PORT:
791fe56b9e6SYuval Mintz 			rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
792fe56b9e6SYuval Mintz 					      p_hwfn->hw_info.hw_mode);
793fe56b9e6SYuval Mintz 			if (rc)
794fe56b9e6SYuval Mintz 				break;
795fe56b9e6SYuval Mintz 
796fe56b9e6SYuval Mintz 		/* Fall into */
797fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
798fe56b9e6SYuval Mintz 			rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
799464f6645SManish Chopra 					    p_tunn, p_hwfn->hw_info.hw_mode,
800fe56b9e6SYuval Mintz 					    b_hw_start, int_mode,
801fe56b9e6SYuval Mintz 					    allow_npar_tx_switch);
802fe56b9e6SYuval Mintz 			break;
803fe56b9e6SYuval Mintz 		default:
804fe56b9e6SYuval Mintz 			rc = -EINVAL;
805fe56b9e6SYuval Mintz 			break;
806fe56b9e6SYuval Mintz 		}
807fe56b9e6SYuval Mintz 
808fe56b9e6SYuval Mintz 		if (rc)
809fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn,
810fe56b9e6SYuval Mintz 				  "init phase failed for loadcode 0x%x (rc %d)\n",
811fe56b9e6SYuval Mintz 				   load_code, rc);
812fe56b9e6SYuval Mintz 
813fe56b9e6SYuval Mintz 		/* ACK mfw regardless of success or failure of initialization */
814fe56b9e6SYuval Mintz 		mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
815fe56b9e6SYuval Mintz 				     DRV_MSG_CODE_LOAD_DONE,
816fe56b9e6SYuval Mintz 				     0, &load_code, &param);
817fe56b9e6SYuval Mintz 		if (rc)
818fe56b9e6SYuval Mintz 			return rc;
819fe56b9e6SYuval Mintz 		if (mfw_rc) {
820fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
821fe56b9e6SYuval Mintz 			return mfw_rc;
822fe56b9e6SYuval Mintz 		}
823fe56b9e6SYuval Mintz 
824fe56b9e6SYuval Mintz 		p_hwfn->hw_init_done = true;
825fe56b9e6SYuval Mintz 	}
826fe56b9e6SYuval Mintz 
827fe56b9e6SYuval Mintz 	return 0;
828fe56b9e6SYuval Mintz }
829fe56b9e6SYuval Mintz 
830fe56b9e6SYuval Mintz #define QED_HW_STOP_RETRY_LIMIT (10)
8318c925c44SYuval Mintz static inline void qed_hw_timers_stop(struct qed_dev *cdev,
8328c925c44SYuval Mintz 				      struct qed_hwfn *p_hwfn,
8338c925c44SYuval Mintz 				      struct qed_ptt *p_ptt)
8348c925c44SYuval Mintz {
8358c925c44SYuval Mintz 	int i;
8368c925c44SYuval Mintz 
8378c925c44SYuval Mintz 	/* close timers */
8388c925c44SYuval Mintz 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
8398c925c44SYuval Mintz 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
8408c925c44SYuval Mintz 
8418c925c44SYuval Mintz 	for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
8428c925c44SYuval Mintz 		if ((!qed_rd(p_hwfn, p_ptt,
8438c925c44SYuval Mintz 			     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
8448c925c44SYuval Mintz 		    (!qed_rd(p_hwfn, p_ptt,
8458c925c44SYuval Mintz 			     TM_REG_PF_SCAN_ACTIVE_TASK)))
8468c925c44SYuval Mintz 			break;
8478c925c44SYuval Mintz 
8488c925c44SYuval Mintz 		/* Dependent on number of connection/tasks, possibly
8498c925c44SYuval Mintz 		 * 1ms sleep is required between polls
8508c925c44SYuval Mintz 		 */
8518c925c44SYuval Mintz 		usleep_range(1000, 2000);
8528c925c44SYuval Mintz 	}
8538c925c44SYuval Mintz 
8548c925c44SYuval Mintz 	if (i < QED_HW_STOP_RETRY_LIMIT)
8558c925c44SYuval Mintz 		return;
8568c925c44SYuval Mintz 
8578c925c44SYuval Mintz 	DP_NOTICE(p_hwfn,
8588c925c44SYuval Mintz 		  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
8598c925c44SYuval Mintz 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
8608c925c44SYuval Mintz 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
8618c925c44SYuval Mintz }
8628c925c44SYuval Mintz 
8638c925c44SYuval Mintz void qed_hw_timers_stop_all(struct qed_dev *cdev)
8648c925c44SYuval Mintz {
8658c925c44SYuval Mintz 	int j;
8668c925c44SYuval Mintz 
8678c925c44SYuval Mintz 	for_each_hwfn(cdev, j) {
8688c925c44SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
8698c925c44SYuval Mintz 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
8708c925c44SYuval Mintz 
8718c925c44SYuval Mintz 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
8728c925c44SYuval Mintz 	}
8738c925c44SYuval Mintz }
8748c925c44SYuval Mintz 
875fe56b9e6SYuval Mintz int qed_hw_stop(struct qed_dev *cdev)
876fe56b9e6SYuval Mintz {
877fe56b9e6SYuval Mintz 	int rc = 0, t_rc;
8788c925c44SYuval Mintz 	int j;
879fe56b9e6SYuval Mintz 
880fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, j) {
881fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
882fe56b9e6SYuval Mintz 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
883fe56b9e6SYuval Mintz 
884fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
885fe56b9e6SYuval Mintz 
8861408cc1fSYuval Mintz 		if (IS_VF(cdev)) {
8870b55e27dSYuval Mintz 			qed_vf_pf_int_cleanup(p_hwfn);
8881408cc1fSYuval Mintz 			continue;
8891408cc1fSYuval Mintz 		}
8901408cc1fSYuval Mintz 
891fe56b9e6SYuval Mintz 		/* mark the hw as uninitialized... */
892fe56b9e6SYuval Mintz 		p_hwfn->hw_init_done = false;
893fe56b9e6SYuval Mintz 
894fe56b9e6SYuval Mintz 		rc = qed_sp_pf_stop(p_hwfn);
895fe56b9e6SYuval Mintz 		if (rc)
8968c925c44SYuval Mintz 			DP_NOTICE(p_hwfn,
8978c925c44SYuval Mintz 				  "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
898fe56b9e6SYuval Mintz 
899fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt,
900fe56b9e6SYuval Mintz 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
901fe56b9e6SYuval Mintz 
902fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
903fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
904fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
905fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
906fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
907fe56b9e6SYuval Mintz 
9088c925c44SYuval Mintz 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
909fe56b9e6SYuval Mintz 
910fe56b9e6SYuval Mintz 		/* Disable Attention Generation */
911fe56b9e6SYuval Mintz 		qed_int_igu_disable_int(p_hwfn, p_ptt);
912fe56b9e6SYuval Mintz 
913fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
914fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
915fe56b9e6SYuval Mintz 
916fe56b9e6SYuval Mintz 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
917fe56b9e6SYuval Mintz 
918fe56b9e6SYuval Mintz 		/* Need to wait 1ms to guarantee SBs are cleared */
919fe56b9e6SYuval Mintz 		usleep_range(1000, 2000);
920fe56b9e6SYuval Mintz 	}
921fe56b9e6SYuval Mintz 
9221408cc1fSYuval Mintz 	if (IS_PF(cdev)) {
923fe56b9e6SYuval Mintz 		/* Disable DMAE in PXP - in CMT, this should only be done for
924fe56b9e6SYuval Mintz 		 * first hw-function, and only after all transactions have
925fe56b9e6SYuval Mintz 		 * stopped for all active hw-functions.
926fe56b9e6SYuval Mintz 		 */
927fe56b9e6SYuval Mintz 		t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
9281408cc1fSYuval Mintz 					   cdev->hwfns[0].p_main_ptt, false);
929fe56b9e6SYuval Mintz 		if (t_rc != 0)
930fe56b9e6SYuval Mintz 			rc = t_rc;
9311408cc1fSYuval Mintz 	}
932fe56b9e6SYuval Mintz 
933fe56b9e6SYuval Mintz 	return rc;
934fe56b9e6SYuval Mintz }
935fe56b9e6SYuval Mintz 
936cee4d264SManish Chopra void qed_hw_stop_fastpath(struct qed_dev *cdev)
937cee4d264SManish Chopra {
9388c925c44SYuval Mintz 	int j;
939cee4d264SManish Chopra 
940cee4d264SManish Chopra 	for_each_hwfn(cdev, j) {
941cee4d264SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
942cee4d264SManish Chopra 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
943cee4d264SManish Chopra 
944dacd88d6SYuval Mintz 		if (IS_VF(cdev)) {
945dacd88d6SYuval Mintz 			qed_vf_pf_int_cleanup(p_hwfn);
946dacd88d6SYuval Mintz 			continue;
947dacd88d6SYuval Mintz 		}
948dacd88d6SYuval Mintz 
949cee4d264SManish Chopra 		DP_VERBOSE(p_hwfn,
950cee4d264SManish Chopra 			   NETIF_MSG_IFDOWN,
951cee4d264SManish Chopra 			   "Shutting down the fastpath\n");
952cee4d264SManish Chopra 
953cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt,
954cee4d264SManish Chopra 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
955cee4d264SManish Chopra 
956cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
957cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
958cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
959cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
960cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
961cee4d264SManish Chopra 
962cee4d264SManish Chopra 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
963cee4d264SManish Chopra 
964cee4d264SManish Chopra 		/* Need to wait 1ms to guarantee SBs are cleared */
965cee4d264SManish Chopra 		usleep_range(1000, 2000);
966cee4d264SManish Chopra 	}
967cee4d264SManish Chopra }
968cee4d264SManish Chopra 
969cee4d264SManish Chopra void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
970cee4d264SManish Chopra {
971dacd88d6SYuval Mintz 	if (IS_VF(p_hwfn->cdev))
972dacd88d6SYuval Mintz 		return;
973dacd88d6SYuval Mintz 
974cee4d264SManish Chopra 	/* Re-open incoming traffic */
975cee4d264SManish Chopra 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
976cee4d264SManish Chopra 	       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
977cee4d264SManish Chopra }
978cee4d264SManish Chopra 
979fe56b9e6SYuval Mintz static int qed_reg_assert(struct qed_hwfn *hwfn,
980fe56b9e6SYuval Mintz 			  struct qed_ptt *ptt, u32 reg,
981fe56b9e6SYuval Mintz 			  bool expected)
982fe56b9e6SYuval Mintz {
983fe56b9e6SYuval Mintz 	u32 assert_val = qed_rd(hwfn, ptt, reg);
984fe56b9e6SYuval Mintz 
985fe56b9e6SYuval Mintz 	if (assert_val != expected) {
986fe56b9e6SYuval Mintz 		DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
987fe56b9e6SYuval Mintz 			  reg, expected);
988fe56b9e6SYuval Mintz 		return -EINVAL;
989fe56b9e6SYuval Mintz 	}
990fe56b9e6SYuval Mintz 
991fe56b9e6SYuval Mintz 	return 0;
992fe56b9e6SYuval Mintz }
993fe56b9e6SYuval Mintz 
994fe56b9e6SYuval Mintz int qed_hw_reset(struct qed_dev *cdev)
995fe56b9e6SYuval Mintz {
996fe56b9e6SYuval Mintz 	int rc = 0;
997fe56b9e6SYuval Mintz 	u32 unload_resp, unload_param;
998fe56b9e6SYuval Mintz 	int i;
999fe56b9e6SYuval Mintz 
1000fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
1001fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1002fe56b9e6SYuval Mintz 
10031408cc1fSYuval Mintz 		if (IS_VF(cdev)) {
10040b55e27dSYuval Mintz 			rc = qed_vf_pf_reset(p_hwfn);
10050b55e27dSYuval Mintz 			if (rc)
10060b55e27dSYuval Mintz 				return rc;
10071408cc1fSYuval Mintz 			continue;
10081408cc1fSYuval Mintz 		}
10091408cc1fSYuval Mintz 
1010fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
1011fe56b9e6SYuval Mintz 
1012fe56b9e6SYuval Mintz 		/* Check for incorrect states */
1013fe56b9e6SYuval Mintz 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
1014fe56b9e6SYuval Mintz 			       QM_REG_USG_CNT_PF_TX, 0);
1015fe56b9e6SYuval Mintz 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
1016fe56b9e6SYuval Mintz 			       QM_REG_USG_CNT_PF_OTHER, 0);
1017fe56b9e6SYuval Mintz 
1018fe56b9e6SYuval Mintz 		/* Disable PF in HW blocks */
1019fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
1020fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
1021fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1022fe56b9e6SYuval Mintz 		       TCFC_REG_STRONG_ENABLE_PF, 0);
1023fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1024fe56b9e6SYuval Mintz 		       CCFC_REG_STRONG_ENABLE_PF, 0);
1025fe56b9e6SYuval Mintz 
1026fe56b9e6SYuval Mintz 		/* Send unload command to MCP */
1027fe56b9e6SYuval Mintz 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1028fe56b9e6SYuval Mintz 				 DRV_MSG_CODE_UNLOAD_REQ,
1029fe56b9e6SYuval Mintz 				 DRV_MB_PARAM_UNLOAD_WOL_MCP,
1030fe56b9e6SYuval Mintz 				 &unload_resp, &unload_param);
1031fe56b9e6SYuval Mintz 		if (rc) {
1032fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
1033fe56b9e6SYuval Mintz 			unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
1034fe56b9e6SYuval Mintz 		}
1035fe56b9e6SYuval Mintz 
1036fe56b9e6SYuval Mintz 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1037fe56b9e6SYuval Mintz 				 DRV_MSG_CODE_UNLOAD_DONE,
1038fe56b9e6SYuval Mintz 				 0, &unload_resp, &unload_param);
1039fe56b9e6SYuval Mintz 		if (rc) {
1040fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
1041fe56b9e6SYuval Mintz 			return rc;
1042fe56b9e6SYuval Mintz 		}
1043fe56b9e6SYuval Mintz 	}
1044fe56b9e6SYuval Mintz 
1045fe56b9e6SYuval Mintz 	return rc;
1046fe56b9e6SYuval Mintz }
1047fe56b9e6SYuval Mintz 
1048fe56b9e6SYuval Mintz /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
1049fe56b9e6SYuval Mintz static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
1050fe56b9e6SYuval Mintz {
1051fe56b9e6SYuval Mintz 	qed_ptt_pool_free(p_hwfn);
1052fe56b9e6SYuval Mintz 	kfree(p_hwfn->hw_info.p_igu_info);
1053fe56b9e6SYuval Mintz }
1054fe56b9e6SYuval Mintz 
1055fe56b9e6SYuval Mintz /* Setup bar access */
105612e09c69SYuval Mintz static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
1057fe56b9e6SYuval Mintz {
1058fe56b9e6SYuval Mintz 	/* clear indirect access */
1059fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
1060fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
1061fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
1062fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
1063fe56b9e6SYuval Mintz 
1064fe56b9e6SYuval Mintz 	/* Clean Previous errors if such exist */
1065fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1066fe56b9e6SYuval Mintz 	       PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
1067fe56b9e6SYuval Mintz 	       1 << p_hwfn->abs_pf_id);
1068fe56b9e6SYuval Mintz 
1069fe56b9e6SYuval Mintz 	/* enable internal target-read */
1070fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
1071fe56b9e6SYuval Mintz 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1072fe56b9e6SYuval Mintz }
1073fe56b9e6SYuval Mintz 
1074fe56b9e6SYuval Mintz static void get_function_id(struct qed_hwfn *p_hwfn)
1075fe56b9e6SYuval Mintz {
1076fe56b9e6SYuval Mintz 	/* ME Register */
1077fe56b9e6SYuval Mintz 	p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
1078fe56b9e6SYuval Mintz 
1079fe56b9e6SYuval Mintz 	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
1080fe56b9e6SYuval Mintz 
1081fe56b9e6SYuval Mintz 	p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
1082fe56b9e6SYuval Mintz 	p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1083fe56b9e6SYuval Mintz 				      PXP_CONCRETE_FID_PFID);
1084fe56b9e6SYuval Mintz 	p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1085fe56b9e6SYuval Mintz 				    PXP_CONCRETE_FID_PORT);
1086fe56b9e6SYuval Mintz }
1087fe56b9e6SYuval Mintz 
108825c089d7SYuval Mintz static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
108925c089d7SYuval Mintz {
109025c089d7SYuval Mintz 	u32 *feat_num = p_hwfn->hw_info.feat_num;
109125c089d7SYuval Mintz 	int num_features = 1;
109225c089d7SYuval Mintz 
109325c089d7SYuval Mintz 	feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
109425c089d7SYuval Mintz 						num_features,
109525c089d7SYuval Mintz 					RESC_NUM(p_hwfn, QED_L2_QUEUE));
109625c089d7SYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
109725c089d7SYuval Mintz 		   "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
109825c089d7SYuval Mintz 		   feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
109925c089d7SYuval Mintz 		   num_features);
110025c089d7SYuval Mintz }
110125c089d7SYuval Mintz 
1102fe56b9e6SYuval Mintz static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1103fe56b9e6SYuval Mintz {
1104fe56b9e6SYuval Mintz 	u32 *resc_start = p_hwfn->hw_info.resc_start;
11051408cc1fSYuval Mintz 	u8 num_funcs = p_hwfn->num_funcs_on_engine;
1106fe56b9e6SYuval Mintz 	u32 *resc_num = p_hwfn->hw_info.resc_num;
11074ac801b7SYuval Mintz 	struct qed_sb_cnt_info sb_cnt_info;
110808feecd7SYuval Mintz 	int i, max_vf_vlan_filters;
1109fe56b9e6SYuval Mintz 
11104ac801b7SYuval Mintz 	memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
111108feecd7SYuval Mintz 
111208feecd7SYuval Mintz #ifdef CONFIG_QED_SRIOV
111308feecd7SYuval Mintz 	max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS;
111408feecd7SYuval Mintz #else
111508feecd7SYuval Mintz 	max_vf_vlan_filters = 0;
111608feecd7SYuval Mintz #endif
111708feecd7SYuval Mintz 
11184ac801b7SYuval Mintz 	qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
11194ac801b7SYuval Mintz 
1120fe56b9e6SYuval Mintz 	resc_num[QED_SB] = min_t(u32,
1121fe56b9e6SYuval Mintz 				 (MAX_SB_PER_PATH_BB / num_funcs),
11224ac801b7SYuval Mintz 				 sb_cnt_info.sb_cnt);
112325c089d7SYuval Mintz 	resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
1124fe56b9e6SYuval Mintz 	resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
112525c089d7SYuval Mintz 	resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
1126fe56b9e6SYuval Mintz 	resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
1127fe56b9e6SYuval Mintz 	resc_num[QED_RL] = 8;
112825c089d7SYuval Mintz 	resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
112925c089d7SYuval Mintz 	resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
113025c089d7SYuval Mintz 			     num_funcs;
1131fe56b9e6SYuval Mintz 	resc_num[QED_ILT] = 950;
1132fe56b9e6SYuval Mintz 
1133fe56b9e6SYuval Mintz 	for (i = 0; i < QED_MAX_RESC; i++)
1134fe56b9e6SYuval Mintz 		resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1135fe56b9e6SYuval Mintz 
113625c089d7SYuval Mintz 	qed_hw_set_feat(p_hwfn);
113725c089d7SYuval Mintz 
1138fe56b9e6SYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1139fe56b9e6SYuval Mintz 		   "The numbers for each resource are:\n"
1140fe56b9e6SYuval Mintz 		   "SB = %d start = %d\n"
114125c089d7SYuval Mintz 		   "L2_QUEUE = %d start = %d\n"
1142fe56b9e6SYuval Mintz 		   "VPORT = %d start = %d\n"
1143fe56b9e6SYuval Mintz 		   "PQ = %d start = %d\n"
1144fe56b9e6SYuval Mintz 		   "RL = %d start = %d\n"
114525c089d7SYuval Mintz 		   "MAC = %d start = %d\n"
114625c089d7SYuval Mintz 		   "VLAN = %d start = %d\n"
1147fe56b9e6SYuval Mintz 		   "ILT = %d start = %d\n",
1148fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_SB],
1149fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_SB],
115025c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
115125c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
1152fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_VPORT],
1153fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_VPORT],
1154fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_PQ],
1155fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_PQ],
1156fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_RL],
1157fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_RL],
115825c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_MAC],
115925c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_MAC],
116025c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_VLAN],
116125c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_VLAN],
1162fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_ILT],
1163fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_ILT]);
1164fe56b9e6SYuval Mintz }
1165fe56b9e6SYuval Mintz 
1166fe56b9e6SYuval Mintz static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1167fe56b9e6SYuval Mintz 			       struct qed_ptt *p_ptt)
1168fe56b9e6SYuval Mintz {
1169cc875c2eSYuval Mintz 	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1170fc48b7a6SYuval Mintz 	u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
1171cc875c2eSYuval Mintz 	struct qed_mcp_link_params *link;
1172fe56b9e6SYuval Mintz 
1173fe56b9e6SYuval Mintz 	/* Read global nvm_cfg address */
1174fe56b9e6SYuval Mintz 	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1175fe56b9e6SYuval Mintz 
1176fe56b9e6SYuval Mintz 	/* Verify MCP has initialized it */
1177fe56b9e6SYuval Mintz 	if (!nvm_cfg_addr) {
1178fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1179fe56b9e6SYuval Mintz 		return -EINVAL;
1180fe56b9e6SYuval Mintz 	}
1181fe56b9e6SYuval Mintz 
1182fe56b9e6SYuval Mintz 	/* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
1183fe56b9e6SYuval Mintz 	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1184fe56b9e6SYuval Mintz 
1185cc875c2eSYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1186cc875c2eSYuval Mintz 	       offsetof(struct nvm_cfg1, glob) +
1187cc875c2eSYuval Mintz 	       offsetof(struct nvm_cfg1_glob, core_cfg);
1188cc875c2eSYuval Mintz 
1189cc875c2eSYuval Mintz 	core_cfg = qed_rd(p_hwfn, p_ptt, addr);
1190cc875c2eSYuval Mintz 
1191cc875c2eSYuval Mintz 	switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1192cc875c2eSYuval Mintz 		NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1193cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1194cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1195cc875c2eSYuval Mintz 		break;
1196cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1197cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1198cc875c2eSYuval Mintz 		break;
1199cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1200cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1201cc875c2eSYuval Mintz 		break;
1202cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1203cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1204cc875c2eSYuval Mintz 		break;
1205cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1206cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1207cc875c2eSYuval Mintz 		break;
1208cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1209cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1210cc875c2eSYuval Mintz 		break;
1211cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1212cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1213cc875c2eSYuval Mintz 		break;
1214cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1215cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1216cc875c2eSYuval Mintz 		break;
1217cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1218cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1219cc875c2eSYuval Mintz 		break;
1220cc875c2eSYuval Mintz 	default:
1221cc875c2eSYuval Mintz 		DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
1222cc875c2eSYuval Mintz 			  core_cfg);
1223cc875c2eSYuval Mintz 		break;
1224cc875c2eSYuval Mintz 	}
1225cc875c2eSYuval Mintz 
1226cc875c2eSYuval Mintz 	/* Read default link configuration */
1227cc875c2eSYuval Mintz 	link = &p_hwfn->mcp_info->link_input;
1228cc875c2eSYuval Mintz 	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1229cc875c2eSYuval Mintz 			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1230cc875c2eSYuval Mintz 	link_temp = qed_rd(p_hwfn, p_ptt,
1231cc875c2eSYuval Mintz 			   port_cfg_addr +
1232cc875c2eSYuval Mintz 			   offsetof(struct nvm_cfg1_port, speed_cap_mask));
1233cc875c2eSYuval Mintz 	link->speed.advertised_speeds =
1234cc875c2eSYuval Mintz 		link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1235cc875c2eSYuval Mintz 
1236cc875c2eSYuval Mintz 	p_hwfn->mcp_info->link_capabilities.speed_capabilities =
1237cc875c2eSYuval Mintz 						link->speed.advertised_speeds;
1238cc875c2eSYuval Mintz 
1239cc875c2eSYuval Mintz 	link_temp = qed_rd(p_hwfn, p_ptt,
1240cc875c2eSYuval Mintz 			   port_cfg_addr +
1241cc875c2eSYuval Mintz 			   offsetof(struct nvm_cfg1_port, link_settings));
1242cc875c2eSYuval Mintz 	switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1243cc875c2eSYuval Mintz 		NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1244cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1245cc875c2eSYuval Mintz 		link->speed.autoneg = true;
1246cc875c2eSYuval Mintz 		break;
1247cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1248cc875c2eSYuval Mintz 		link->speed.forced_speed = 1000;
1249cc875c2eSYuval Mintz 		break;
1250cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1251cc875c2eSYuval Mintz 		link->speed.forced_speed = 10000;
1252cc875c2eSYuval Mintz 		break;
1253cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1254cc875c2eSYuval Mintz 		link->speed.forced_speed = 25000;
1255cc875c2eSYuval Mintz 		break;
1256cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1257cc875c2eSYuval Mintz 		link->speed.forced_speed = 40000;
1258cc875c2eSYuval Mintz 		break;
1259cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1260cc875c2eSYuval Mintz 		link->speed.forced_speed = 50000;
1261cc875c2eSYuval Mintz 		break;
1262cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1263cc875c2eSYuval Mintz 		link->speed.forced_speed = 100000;
1264cc875c2eSYuval Mintz 		break;
1265cc875c2eSYuval Mintz 	default:
1266cc875c2eSYuval Mintz 		DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
1267cc875c2eSYuval Mintz 			  link_temp);
1268cc875c2eSYuval Mintz 	}
1269cc875c2eSYuval Mintz 
1270cc875c2eSYuval Mintz 	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1271cc875c2eSYuval Mintz 	link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1272cc875c2eSYuval Mintz 	link->pause.autoneg = !!(link_temp &
1273cc875c2eSYuval Mintz 				 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1274cc875c2eSYuval Mintz 	link->pause.forced_rx = !!(link_temp &
1275cc875c2eSYuval Mintz 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1276cc875c2eSYuval Mintz 	link->pause.forced_tx = !!(link_temp &
1277cc875c2eSYuval Mintz 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1278cc875c2eSYuval Mintz 	link->loopback_mode = 0;
1279cc875c2eSYuval Mintz 
1280cc875c2eSYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1281cc875c2eSYuval Mintz 		   "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1282cc875c2eSYuval Mintz 		   link->speed.forced_speed, link->speed.advertised_speeds,
1283cc875c2eSYuval Mintz 		   link->speed.autoneg, link->pause.autoneg);
1284cc875c2eSYuval Mintz 
1285fe56b9e6SYuval Mintz 	/* Read Multi-function information from shmem */
1286fe56b9e6SYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1287fe56b9e6SYuval Mintz 	       offsetof(struct nvm_cfg1, glob) +
1288fe56b9e6SYuval Mintz 	       offsetof(struct nvm_cfg1_glob, generic_cont0);
1289fe56b9e6SYuval Mintz 
1290fe56b9e6SYuval Mintz 	generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1291fe56b9e6SYuval Mintz 
1292fe56b9e6SYuval Mintz 	mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1293fe56b9e6SYuval Mintz 		  NVM_CFG1_GLOB_MF_MODE_OFFSET;
1294fe56b9e6SYuval Mintz 
1295fe56b9e6SYuval Mintz 	switch (mf_mode) {
1296fe56b9e6SYuval Mintz 	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1297fc48b7a6SYuval Mintz 		p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
1298fe56b9e6SYuval Mintz 		break;
1299fe56b9e6SYuval Mintz 	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1300fc48b7a6SYuval Mintz 		p_hwfn->cdev->mf_mode = QED_MF_NPAR;
1301fe56b9e6SYuval Mintz 		break;
1302fc48b7a6SYuval Mintz 	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
1303fc48b7a6SYuval Mintz 		p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
1304fe56b9e6SYuval Mintz 		break;
1305fe56b9e6SYuval Mintz 	}
1306fe56b9e6SYuval Mintz 	DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1307fe56b9e6SYuval Mintz 		p_hwfn->cdev->mf_mode);
1308fe56b9e6SYuval Mintz 
1309fc48b7a6SYuval Mintz 	/* Read Multi-function information from shmem */
1310fc48b7a6SYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1311fc48b7a6SYuval Mintz 		offsetof(struct nvm_cfg1, glob) +
1312fc48b7a6SYuval Mintz 		offsetof(struct nvm_cfg1_glob, device_capabilities);
1313fc48b7a6SYuval Mintz 
1314fc48b7a6SYuval Mintz 	device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
1315fc48b7a6SYuval Mintz 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
1316fc48b7a6SYuval Mintz 		__set_bit(QED_DEV_CAP_ETH,
1317fc48b7a6SYuval Mintz 			  &p_hwfn->hw_info.device_capabilities);
1318fc48b7a6SYuval Mintz 
1319fe56b9e6SYuval Mintz 	return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1320fe56b9e6SYuval Mintz }
1321fe56b9e6SYuval Mintz 
13221408cc1fSYuval Mintz static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
13231408cc1fSYuval Mintz {
13241408cc1fSYuval Mintz 	u32 reg_function_hide, tmp, eng_mask;
13251408cc1fSYuval Mintz 	u8 num_funcs;
13261408cc1fSYuval Mintz 
13271408cc1fSYuval Mintz 	num_funcs = MAX_NUM_PFS_BB;
13281408cc1fSYuval Mintz 
13291408cc1fSYuval Mintz 	/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
13301408cc1fSYuval Mintz 	 * in the other bits are selected.
13311408cc1fSYuval Mintz 	 * Bits 1-15 are for functions 1-15, respectively, and their value is
13321408cc1fSYuval Mintz 	 * '0' only for enabled functions (function 0 always exists and
13331408cc1fSYuval Mintz 	 * enabled).
13341408cc1fSYuval Mintz 	 * In case of CMT, only the "even" functions are enabled, and thus the
13351408cc1fSYuval Mintz 	 * number of functions for both hwfns is learnt from the same bits.
13361408cc1fSYuval Mintz 	 */
13371408cc1fSYuval Mintz 	reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
13381408cc1fSYuval Mintz 
13391408cc1fSYuval Mintz 	if (reg_function_hide & 0x1) {
13401408cc1fSYuval Mintz 		if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
13411408cc1fSYuval Mintz 			num_funcs = 0;
13421408cc1fSYuval Mintz 			eng_mask = 0xaaaa;
13431408cc1fSYuval Mintz 		} else {
13441408cc1fSYuval Mintz 			num_funcs = 1;
13451408cc1fSYuval Mintz 			eng_mask = 0x5554;
13461408cc1fSYuval Mintz 		}
13471408cc1fSYuval Mintz 
13481408cc1fSYuval Mintz 		/* Get the number of the enabled functions on the engine */
13491408cc1fSYuval Mintz 		tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
13501408cc1fSYuval Mintz 		while (tmp) {
13511408cc1fSYuval Mintz 			if (tmp & 0x1)
13521408cc1fSYuval Mintz 				num_funcs++;
13531408cc1fSYuval Mintz 			tmp >>= 0x1;
13541408cc1fSYuval Mintz 		}
13551408cc1fSYuval Mintz 	}
13561408cc1fSYuval Mintz 
13571408cc1fSYuval Mintz 	p_hwfn->num_funcs_on_engine = num_funcs;
13581408cc1fSYuval Mintz 
13591408cc1fSYuval Mintz 	DP_VERBOSE(p_hwfn,
13601408cc1fSYuval Mintz 		   NETIF_MSG_PROBE,
13611408cc1fSYuval Mintz 		   "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
13621408cc1fSYuval Mintz 		   p_hwfn->rel_pf_id,
13631408cc1fSYuval Mintz 		   p_hwfn->abs_pf_id,
13641408cc1fSYuval Mintz 		   p_hwfn->num_funcs_on_engine);
13651408cc1fSYuval Mintz }
13661408cc1fSYuval Mintz 
1367fe56b9e6SYuval Mintz static int
1368fe56b9e6SYuval Mintz qed_get_hw_info(struct qed_hwfn *p_hwfn,
1369fe56b9e6SYuval Mintz 		struct qed_ptt *p_ptt,
1370fe56b9e6SYuval Mintz 		enum qed_pci_personality personality)
1371fe56b9e6SYuval Mintz {
1372fe56b9e6SYuval Mintz 	u32 port_mode;
1373fe56b9e6SYuval Mintz 	int rc;
1374fe56b9e6SYuval Mintz 
137532a47e72SYuval Mintz 	/* Since all information is common, only first hwfns should do this */
137632a47e72SYuval Mintz 	if (IS_LEAD_HWFN(p_hwfn)) {
137732a47e72SYuval Mintz 		rc = qed_iov_hw_info(p_hwfn);
137832a47e72SYuval Mintz 		if (rc)
137932a47e72SYuval Mintz 			return rc;
138032a47e72SYuval Mintz 	}
138132a47e72SYuval Mintz 
1382fe56b9e6SYuval Mintz 	/* Read the port mode */
1383fe56b9e6SYuval Mintz 	port_mode = qed_rd(p_hwfn, p_ptt,
1384fe56b9e6SYuval Mintz 			   CNIG_REG_NW_PORT_MODE_BB_B0);
1385fe56b9e6SYuval Mintz 
1386fe56b9e6SYuval Mintz 	if (port_mode < 3) {
1387fe56b9e6SYuval Mintz 		p_hwfn->cdev->num_ports_in_engines = 1;
1388fe56b9e6SYuval Mintz 	} else if (port_mode <= 5) {
1389fe56b9e6SYuval Mintz 		p_hwfn->cdev->num_ports_in_engines = 2;
1390fe56b9e6SYuval Mintz 	} else {
1391fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1392fe56b9e6SYuval Mintz 			  p_hwfn->cdev->num_ports_in_engines);
1393fe56b9e6SYuval Mintz 
1394fe56b9e6SYuval Mintz 		/* Default num_ports_in_engines to something */
1395fe56b9e6SYuval Mintz 		p_hwfn->cdev->num_ports_in_engines = 1;
1396fe56b9e6SYuval Mintz 	}
1397fe56b9e6SYuval Mintz 
1398fe56b9e6SYuval Mintz 	qed_hw_get_nvm_info(p_hwfn, p_ptt);
1399fe56b9e6SYuval Mintz 
1400fe56b9e6SYuval Mintz 	rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1401fe56b9e6SYuval Mintz 	if (rc)
1402fe56b9e6SYuval Mintz 		return rc;
1403fe56b9e6SYuval Mintz 
1404fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn))
1405fe56b9e6SYuval Mintz 		ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1406fe56b9e6SYuval Mintz 				p_hwfn->mcp_info->func_info.mac);
1407fe56b9e6SYuval Mintz 	else
1408fe56b9e6SYuval Mintz 		eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1409fe56b9e6SYuval Mintz 
1410fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn)) {
1411fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1412fe56b9e6SYuval Mintz 			p_hwfn->hw_info.ovlan =
1413fe56b9e6SYuval Mintz 				p_hwfn->mcp_info->func_info.ovlan;
1414fe56b9e6SYuval Mintz 
1415fe56b9e6SYuval Mintz 		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1416fe56b9e6SYuval Mintz 	}
1417fe56b9e6SYuval Mintz 
1418fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn)) {
1419fe56b9e6SYuval Mintz 		enum qed_pci_personality protocol;
1420fe56b9e6SYuval Mintz 
1421fe56b9e6SYuval Mintz 		protocol = p_hwfn->mcp_info->func_info.protocol;
1422fe56b9e6SYuval Mintz 		p_hwfn->hw_info.personality = protocol;
1423fe56b9e6SYuval Mintz 	}
1424fe56b9e6SYuval Mintz 
14251408cc1fSYuval Mintz 	qed_get_num_funcs(p_hwfn, p_ptt);
14261408cc1fSYuval Mintz 
1427fe56b9e6SYuval Mintz 	qed_hw_get_resc(p_hwfn);
1428fe56b9e6SYuval Mintz 
1429fe56b9e6SYuval Mintz 	return rc;
1430fe56b9e6SYuval Mintz }
1431fe56b9e6SYuval Mintz 
143212e09c69SYuval Mintz static int qed_get_dev_info(struct qed_dev *cdev)
1433fe56b9e6SYuval Mintz {
1434fc48b7a6SYuval Mintz 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1435fe56b9e6SYuval Mintz 	u32 tmp;
1436fe56b9e6SYuval Mintz 
1437fc48b7a6SYuval Mintz 	/* Read Vendor Id / Device Id */
1438fc48b7a6SYuval Mintz 	pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
1439fc48b7a6SYuval Mintz 			     &cdev->vendor_id);
1440fc48b7a6SYuval Mintz 	pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
1441fc48b7a6SYuval Mintz 			     &cdev->device_id);
1442fc48b7a6SYuval Mintz 	cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1443fe56b9e6SYuval Mintz 				     MISCS_REG_CHIP_NUM);
1444fc48b7a6SYuval Mintz 	cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1445fe56b9e6SYuval Mintz 				     MISCS_REG_CHIP_REV);
1446fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_REV, cdev->chip_rev);
1447fe56b9e6SYuval Mintz 
1448fc48b7a6SYuval Mintz 	cdev->type = QED_DEV_TYPE_BB;
1449fe56b9e6SYuval Mintz 	/* Learn number of HW-functions */
1450fc48b7a6SYuval Mintz 	tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1451fe56b9e6SYuval Mintz 		     MISCS_REG_CMT_ENABLED_FOR_PAIR);
1452fe56b9e6SYuval Mintz 
1453fc48b7a6SYuval Mintz 	if (tmp & (1 << p_hwfn->rel_pf_id)) {
1454fe56b9e6SYuval Mintz 		DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1455fe56b9e6SYuval Mintz 		cdev->num_hwfns = 2;
1456fe56b9e6SYuval Mintz 	} else {
1457fe56b9e6SYuval Mintz 		cdev->num_hwfns = 1;
1458fe56b9e6SYuval Mintz 	}
1459fe56b9e6SYuval Mintz 
1460fc48b7a6SYuval Mintz 	cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1461fe56b9e6SYuval Mintz 				    MISCS_REG_CHIP_TEST_REG) >> 4;
1462fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
1463fc48b7a6SYuval Mintz 	cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1464fe56b9e6SYuval Mintz 				       MISCS_REG_CHIP_METAL);
1465fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1466fe56b9e6SYuval Mintz 
1467fe56b9e6SYuval Mintz 	DP_INFO(cdev->hwfns,
1468fe56b9e6SYuval Mintz 		"Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1469fe56b9e6SYuval Mintz 		cdev->chip_num, cdev->chip_rev,
1470fe56b9e6SYuval Mintz 		cdev->chip_bond_id, cdev->chip_metal);
147112e09c69SYuval Mintz 
147212e09c69SYuval Mintz 	if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
147312e09c69SYuval Mintz 		DP_NOTICE(cdev->hwfns,
147412e09c69SYuval Mintz 			  "The chip type/rev (BB A0) is not supported!\n");
147512e09c69SYuval Mintz 		return -EINVAL;
147612e09c69SYuval Mintz 	}
147712e09c69SYuval Mintz 
147812e09c69SYuval Mintz 	return 0;
1479fe56b9e6SYuval Mintz }
1480fe56b9e6SYuval Mintz 
1481fe56b9e6SYuval Mintz static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1482fe56b9e6SYuval Mintz 				 void __iomem *p_regview,
1483fe56b9e6SYuval Mintz 				 void __iomem *p_doorbells,
1484fe56b9e6SYuval Mintz 				 enum qed_pci_personality personality)
1485fe56b9e6SYuval Mintz {
1486fe56b9e6SYuval Mintz 	int rc = 0;
1487fe56b9e6SYuval Mintz 
1488fe56b9e6SYuval Mintz 	/* Split PCI bars evenly between hwfns */
1489fe56b9e6SYuval Mintz 	p_hwfn->regview = p_regview;
1490fe56b9e6SYuval Mintz 	p_hwfn->doorbells = p_doorbells;
1491fe56b9e6SYuval Mintz 
14921408cc1fSYuval Mintz 	if (IS_VF(p_hwfn->cdev))
14931408cc1fSYuval Mintz 		return qed_vf_hw_prepare(p_hwfn);
14941408cc1fSYuval Mintz 
1495fe56b9e6SYuval Mintz 	/* Validate that chip access is feasible */
1496fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1497fe56b9e6SYuval Mintz 		DP_ERR(p_hwfn,
1498fe56b9e6SYuval Mintz 		       "Reading the ME register returns all Fs; Preventing further chip access\n");
1499fe56b9e6SYuval Mintz 		return -EINVAL;
1500fe56b9e6SYuval Mintz 	}
1501fe56b9e6SYuval Mintz 
1502fe56b9e6SYuval Mintz 	get_function_id(p_hwfn);
1503fe56b9e6SYuval Mintz 
150412e09c69SYuval Mintz 	/* Allocate PTT pool */
150512e09c69SYuval Mintz 	rc = qed_ptt_pool_alloc(p_hwfn);
1506fe56b9e6SYuval Mintz 	if (rc) {
1507fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1508fe56b9e6SYuval Mintz 		goto err0;
1509fe56b9e6SYuval Mintz 	}
1510fe56b9e6SYuval Mintz 
151112e09c69SYuval Mintz 	/* Allocate the main PTT */
151212e09c69SYuval Mintz 	p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
151312e09c69SYuval Mintz 
1514fe56b9e6SYuval Mintz 	/* First hwfn learns basic information, e.g., number of hwfns */
151512e09c69SYuval Mintz 	if (!p_hwfn->my_id) {
151612e09c69SYuval Mintz 		rc = qed_get_dev_info(p_hwfn->cdev);
151712e09c69SYuval Mintz 		if (rc != 0)
151812e09c69SYuval Mintz 			goto err1;
151912e09c69SYuval Mintz 	}
152012e09c69SYuval Mintz 
152112e09c69SYuval Mintz 	qed_hw_hwfn_prepare(p_hwfn);
1522fe56b9e6SYuval Mintz 
1523fe56b9e6SYuval Mintz 	/* Initialize MCP structure */
1524fe56b9e6SYuval Mintz 	rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1525fe56b9e6SYuval Mintz 	if (rc) {
1526fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1527fe56b9e6SYuval Mintz 		goto err1;
1528fe56b9e6SYuval Mintz 	}
1529fe56b9e6SYuval Mintz 
1530fe56b9e6SYuval Mintz 	/* Read the device configuration information from the HW and SHMEM */
1531fe56b9e6SYuval Mintz 	rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1532fe56b9e6SYuval Mintz 	if (rc) {
1533fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1534fe56b9e6SYuval Mintz 		goto err2;
1535fe56b9e6SYuval Mintz 	}
1536fe56b9e6SYuval Mintz 
1537fe56b9e6SYuval Mintz 	/* Allocate the init RT array and initialize the init-ops engine */
1538fe56b9e6SYuval Mintz 	rc = qed_init_alloc(p_hwfn);
1539fe56b9e6SYuval Mintz 	if (rc) {
1540fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1541fe56b9e6SYuval Mintz 		goto err2;
1542fe56b9e6SYuval Mintz 	}
1543fe56b9e6SYuval Mintz 
1544fe56b9e6SYuval Mintz 	return rc;
1545fe56b9e6SYuval Mintz err2:
154632a47e72SYuval Mintz 	if (IS_LEAD_HWFN(p_hwfn))
154732a47e72SYuval Mintz 		qed_iov_free_hw_info(p_hwfn->cdev);
1548fe56b9e6SYuval Mintz 	qed_mcp_free(p_hwfn);
1549fe56b9e6SYuval Mintz err1:
1550fe56b9e6SYuval Mintz 	qed_hw_hwfn_free(p_hwfn);
1551fe56b9e6SYuval Mintz err0:
1552fe56b9e6SYuval Mintz 	return rc;
1553fe56b9e6SYuval Mintz }
1554fe56b9e6SYuval Mintz 
1555fe56b9e6SYuval Mintz int qed_hw_prepare(struct qed_dev *cdev,
1556fe56b9e6SYuval Mintz 		   int personality)
1557fe56b9e6SYuval Mintz {
1558c78df14eSAriel Elior 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1559c78df14eSAriel Elior 	int rc;
1560fe56b9e6SYuval Mintz 
1561fe56b9e6SYuval Mintz 	/* Store the precompiled init data ptrs */
15621408cc1fSYuval Mintz 	if (IS_PF(cdev))
1563fe56b9e6SYuval Mintz 		qed_init_iro_array(cdev);
1564fe56b9e6SYuval Mintz 
1565fe56b9e6SYuval Mintz 	/* Initialize the first hwfn - will learn number of hwfns */
1566c78df14eSAriel Elior 	rc = qed_hw_prepare_single(p_hwfn,
1567c78df14eSAriel Elior 				   cdev->regview,
1568fe56b9e6SYuval Mintz 				   cdev->doorbells, personality);
1569fe56b9e6SYuval Mintz 	if (rc)
1570fe56b9e6SYuval Mintz 		return rc;
1571fe56b9e6SYuval Mintz 
1572c78df14eSAriel Elior 	personality = p_hwfn->hw_info.personality;
1573fe56b9e6SYuval Mintz 
1574fe56b9e6SYuval Mintz 	/* Initialize the rest of the hwfns */
1575c78df14eSAriel Elior 	if (cdev->num_hwfns > 1) {
1576fe56b9e6SYuval Mintz 		void __iomem *p_regview, *p_doorbell;
1577c78df14eSAriel Elior 		u8 __iomem *addr;
1578fe56b9e6SYuval Mintz 
1579c78df14eSAriel Elior 		/* adjust bar offset for second engine */
1580c2035eeaSRam Amrani 		addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
1581c78df14eSAriel Elior 		p_regview = addr;
1582c78df14eSAriel Elior 
1583c78df14eSAriel Elior 		/* adjust doorbell bar offset for second engine */
1584c2035eeaSRam Amrani 		addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
1585c78df14eSAriel Elior 		p_doorbell = addr;
1586c78df14eSAriel Elior 
1587c78df14eSAriel Elior 		/* prepare second hw function */
1588c78df14eSAriel Elior 		rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
1589fe56b9e6SYuval Mintz 					   p_doorbell, personality);
1590c78df14eSAriel Elior 
1591c78df14eSAriel Elior 		/* in case of error, need to free the previously
1592c78df14eSAriel Elior 		 * initiliazed hwfn 0.
1593c78df14eSAriel Elior 		 */
1594fe56b9e6SYuval Mintz 		if (rc) {
15951408cc1fSYuval Mintz 			if (IS_PF(cdev)) {
1596c78df14eSAriel Elior 				qed_init_free(p_hwfn);
1597c78df14eSAriel Elior 				qed_mcp_free(p_hwfn);
1598c78df14eSAriel Elior 				qed_hw_hwfn_free(p_hwfn);
1599fe56b9e6SYuval Mintz 			}
1600fe56b9e6SYuval Mintz 		}
16011408cc1fSYuval Mintz 	}
1602fe56b9e6SYuval Mintz 
1603c78df14eSAriel Elior 	return rc;
1604fe56b9e6SYuval Mintz }
1605fe56b9e6SYuval Mintz 
1606fe56b9e6SYuval Mintz void qed_hw_remove(struct qed_dev *cdev)
1607fe56b9e6SYuval Mintz {
1608fe56b9e6SYuval Mintz 	int i;
1609fe56b9e6SYuval Mintz 
1610fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
1611fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1612fe56b9e6SYuval Mintz 
16131408cc1fSYuval Mintz 		if (IS_VF(cdev)) {
16140b55e27dSYuval Mintz 			qed_vf_pf_release(p_hwfn);
16151408cc1fSYuval Mintz 			continue;
16161408cc1fSYuval Mintz 		}
16171408cc1fSYuval Mintz 
1618fe56b9e6SYuval Mintz 		qed_init_free(p_hwfn);
1619fe56b9e6SYuval Mintz 		qed_hw_hwfn_free(p_hwfn);
1620fe56b9e6SYuval Mintz 		qed_mcp_free(p_hwfn);
1621fe56b9e6SYuval Mintz 	}
162232a47e72SYuval Mintz 
162332a47e72SYuval Mintz 	qed_iov_free_hw_info(cdev);
1624fe56b9e6SYuval Mintz }
1625fe56b9e6SYuval Mintz 
1626fe56b9e6SYuval Mintz int qed_chain_alloc(struct qed_dev *cdev,
1627fe56b9e6SYuval Mintz 		    enum qed_chain_use_mode intended_use,
1628fe56b9e6SYuval Mintz 		    enum qed_chain_mode mode,
1629fe56b9e6SYuval Mintz 		    u16 num_elems,
1630fe56b9e6SYuval Mintz 		    size_t elem_size,
1631fe56b9e6SYuval Mintz 		    struct qed_chain *p_chain)
1632fe56b9e6SYuval Mintz {
1633fe56b9e6SYuval Mintz 	dma_addr_t p_pbl_phys = 0;
1634fe56b9e6SYuval Mintz 	void *p_pbl_virt = NULL;
1635fe56b9e6SYuval Mintz 	dma_addr_t p_phys = 0;
1636fe56b9e6SYuval Mintz 	void *p_virt = NULL;
1637fe56b9e6SYuval Mintz 	u16 page_cnt = 0;
1638fe56b9e6SYuval Mintz 	size_t size;
1639fe56b9e6SYuval Mintz 
1640fe56b9e6SYuval Mintz 	if (mode == QED_CHAIN_MODE_SINGLE)
1641fe56b9e6SYuval Mintz 		page_cnt = 1;
1642fe56b9e6SYuval Mintz 	else
1643fe56b9e6SYuval Mintz 		page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1644fe56b9e6SYuval Mintz 
1645fe56b9e6SYuval Mintz 	size = page_cnt * QED_CHAIN_PAGE_SIZE;
1646fe56b9e6SYuval Mintz 	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1647fe56b9e6SYuval Mintz 				    size, &p_phys, GFP_KERNEL);
1648fe56b9e6SYuval Mintz 	if (!p_virt) {
1649fe56b9e6SYuval Mintz 		DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1650fe56b9e6SYuval Mintz 		goto nomem;
1651fe56b9e6SYuval Mintz 	}
1652fe56b9e6SYuval Mintz 
1653fe56b9e6SYuval Mintz 	if (mode == QED_CHAIN_MODE_PBL) {
1654fe56b9e6SYuval Mintz 		size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1655fe56b9e6SYuval Mintz 		p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1656fe56b9e6SYuval Mintz 						size, &p_pbl_phys,
1657fe56b9e6SYuval Mintz 						GFP_KERNEL);
1658fe56b9e6SYuval Mintz 		if (!p_pbl_virt) {
1659fe56b9e6SYuval Mintz 			DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1660fe56b9e6SYuval Mintz 			goto nomem;
1661fe56b9e6SYuval Mintz 		}
1662fe56b9e6SYuval Mintz 
1663fe56b9e6SYuval Mintz 		qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1664fe56b9e6SYuval Mintz 				   (u8)elem_size, intended_use,
1665fe56b9e6SYuval Mintz 				   p_pbl_phys, p_pbl_virt);
1666fe56b9e6SYuval Mintz 	} else {
1667fe56b9e6SYuval Mintz 		qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1668fe56b9e6SYuval Mintz 			       (u8)elem_size, intended_use, mode);
1669fe56b9e6SYuval Mintz 	}
1670fe56b9e6SYuval Mintz 
1671fe56b9e6SYuval Mintz 	return 0;
1672fe56b9e6SYuval Mintz 
1673fe56b9e6SYuval Mintz nomem:
1674fe56b9e6SYuval Mintz 	dma_free_coherent(&cdev->pdev->dev,
1675fe56b9e6SYuval Mintz 			  page_cnt * QED_CHAIN_PAGE_SIZE,
1676fe56b9e6SYuval Mintz 			  p_virt, p_phys);
1677fe56b9e6SYuval Mintz 	dma_free_coherent(&cdev->pdev->dev,
1678fe56b9e6SYuval Mintz 			  page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1679fe56b9e6SYuval Mintz 			  p_pbl_virt, p_pbl_phys);
1680fe56b9e6SYuval Mintz 
1681fe56b9e6SYuval Mintz 	return -ENOMEM;
1682fe56b9e6SYuval Mintz }
1683fe56b9e6SYuval Mintz 
1684fe56b9e6SYuval Mintz void qed_chain_free(struct qed_dev *cdev,
1685fe56b9e6SYuval Mintz 		    struct qed_chain *p_chain)
1686fe56b9e6SYuval Mintz {
1687fe56b9e6SYuval Mintz 	size_t size;
1688fe56b9e6SYuval Mintz 
1689fe56b9e6SYuval Mintz 	if (!p_chain->p_virt_addr)
1690fe56b9e6SYuval Mintz 		return;
1691fe56b9e6SYuval Mintz 
1692fe56b9e6SYuval Mintz 	if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1693fe56b9e6SYuval Mintz 		size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1694fe56b9e6SYuval Mintz 		dma_free_coherent(&cdev->pdev->dev, size,
1695fe56b9e6SYuval Mintz 				  p_chain->pbl.p_virt_table,
1696fe56b9e6SYuval Mintz 				  p_chain->pbl.p_phys_table);
1697fe56b9e6SYuval Mintz 	}
1698fe56b9e6SYuval Mintz 
1699fe56b9e6SYuval Mintz 	size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1700fe56b9e6SYuval Mintz 	dma_free_coherent(&cdev->pdev->dev, size,
1701fe56b9e6SYuval Mintz 			  p_chain->p_virt_addr,
1702fe56b9e6SYuval Mintz 			  p_chain->p_phys_addr);
1703fe56b9e6SYuval Mintz }
1704cee4d264SManish Chopra 
1705cee4d264SManish Chopra int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
1706cee4d264SManish Chopra 		    u16 src_id, u16 *dst_id)
1707cee4d264SManish Chopra {
1708cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
1709cee4d264SManish Chopra 		u16 min, max;
1710cee4d264SManish Chopra 
1711cee4d264SManish Chopra 		min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
1712cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
1713cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
1714cee4d264SManish Chopra 			  "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1715cee4d264SManish Chopra 			  src_id, min, max);
1716cee4d264SManish Chopra 
1717cee4d264SManish Chopra 		return -EINVAL;
1718cee4d264SManish Chopra 	}
1719cee4d264SManish Chopra 
1720cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
1721cee4d264SManish Chopra 
1722cee4d264SManish Chopra 	return 0;
1723cee4d264SManish Chopra }
1724cee4d264SManish Chopra 
1725cee4d264SManish Chopra int qed_fw_vport(struct qed_hwfn *p_hwfn,
1726cee4d264SManish Chopra 		 u8 src_id, u8 *dst_id)
1727cee4d264SManish Chopra {
1728cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
1729cee4d264SManish Chopra 		u8 min, max;
1730cee4d264SManish Chopra 
1731cee4d264SManish Chopra 		min = (u8)RESC_START(p_hwfn, QED_VPORT);
1732cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_VPORT);
1733cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
1734cee4d264SManish Chopra 			  "vport id [%d] is not valid, available indices [%d - %d]\n",
1735cee4d264SManish Chopra 			  src_id, min, max);
1736cee4d264SManish Chopra 
1737cee4d264SManish Chopra 		return -EINVAL;
1738cee4d264SManish Chopra 	}
1739cee4d264SManish Chopra 
1740cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
1741cee4d264SManish Chopra 
1742cee4d264SManish Chopra 	return 0;
1743cee4d264SManish Chopra }
1744cee4d264SManish Chopra 
1745cee4d264SManish Chopra int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
1746cee4d264SManish Chopra 		   u8 src_id, u8 *dst_id)
1747cee4d264SManish Chopra {
1748cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
1749cee4d264SManish Chopra 		u8 min, max;
1750cee4d264SManish Chopra 
1751cee4d264SManish Chopra 		min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
1752cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
1753cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
1754cee4d264SManish Chopra 			  "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1755cee4d264SManish Chopra 			  src_id, min, max);
1756cee4d264SManish Chopra 
1757cee4d264SManish Chopra 		return -EINVAL;
1758cee4d264SManish Chopra 	}
1759cee4d264SManish Chopra 
1760cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
1761cee4d264SManish Chopra 
1762cee4d264SManish Chopra 	return 0;
1763cee4d264SManish Chopra }
1764bcd197c8SManish Chopra 
1765bcd197c8SManish Chopra /* Calculate final WFQ values for all vports and configure them.
1766bcd197c8SManish Chopra  * After this configuration each vport will have
1767bcd197c8SManish Chopra  * approx min rate =  min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
1768bcd197c8SManish Chopra  */
1769bcd197c8SManish Chopra static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1770bcd197c8SManish Chopra 					     struct qed_ptt *p_ptt,
1771bcd197c8SManish Chopra 					     u32 min_pf_rate)
1772bcd197c8SManish Chopra {
1773bcd197c8SManish Chopra 	struct init_qm_vport_params *vport_params;
1774bcd197c8SManish Chopra 	int i;
1775bcd197c8SManish Chopra 
1776bcd197c8SManish Chopra 	vport_params = p_hwfn->qm_info.qm_vport_params;
1777bcd197c8SManish Chopra 
1778bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1779bcd197c8SManish Chopra 		u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1780bcd197c8SManish Chopra 
1781bcd197c8SManish Chopra 		vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
1782bcd197c8SManish Chopra 						min_pf_rate;
1783bcd197c8SManish Chopra 		qed_init_vport_wfq(p_hwfn, p_ptt,
1784bcd197c8SManish Chopra 				   vport_params[i].first_tx_pq_id,
1785bcd197c8SManish Chopra 				   vport_params[i].vport_wfq);
1786bcd197c8SManish Chopra 	}
1787bcd197c8SManish Chopra }
1788bcd197c8SManish Chopra 
1789bcd197c8SManish Chopra static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
1790bcd197c8SManish Chopra 				       u32 min_pf_rate)
1791bcd197c8SManish Chopra 
1792bcd197c8SManish Chopra {
1793bcd197c8SManish Chopra 	int i;
1794bcd197c8SManish Chopra 
1795bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
1796bcd197c8SManish Chopra 		p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
1797bcd197c8SManish Chopra }
1798bcd197c8SManish Chopra 
1799bcd197c8SManish Chopra static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
1800bcd197c8SManish Chopra 					   struct qed_ptt *p_ptt,
1801bcd197c8SManish Chopra 					   u32 min_pf_rate)
1802bcd197c8SManish Chopra {
1803bcd197c8SManish Chopra 	struct init_qm_vport_params *vport_params;
1804bcd197c8SManish Chopra 	int i;
1805bcd197c8SManish Chopra 
1806bcd197c8SManish Chopra 	vport_params = p_hwfn->qm_info.qm_vport_params;
1807bcd197c8SManish Chopra 
1808bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1809bcd197c8SManish Chopra 		qed_init_wfq_default_param(p_hwfn, min_pf_rate);
1810bcd197c8SManish Chopra 		qed_init_vport_wfq(p_hwfn, p_ptt,
1811bcd197c8SManish Chopra 				   vport_params[i].first_tx_pq_id,
1812bcd197c8SManish Chopra 				   vport_params[i].vport_wfq);
1813bcd197c8SManish Chopra 	}
1814bcd197c8SManish Chopra }
1815bcd197c8SManish Chopra 
1816bcd197c8SManish Chopra /* This function performs several validations for WFQ
1817bcd197c8SManish Chopra  * configuration and required min rate for a given vport
1818bcd197c8SManish Chopra  * 1. req_rate must be greater than one percent of min_pf_rate.
1819bcd197c8SManish Chopra  * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
1820bcd197c8SManish Chopra  *    rates to get less than one percent of min_pf_rate.
1821bcd197c8SManish Chopra  * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
1822bcd197c8SManish Chopra  */
1823bcd197c8SManish Chopra static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
1824bcd197c8SManish Chopra 			      u16 vport_id, u32 req_rate,
1825bcd197c8SManish Chopra 			      u32 min_pf_rate)
1826bcd197c8SManish Chopra {
1827bcd197c8SManish Chopra 	u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
1828bcd197c8SManish Chopra 	int non_requested_count = 0, req_count = 0, i, num_vports;
1829bcd197c8SManish Chopra 
1830bcd197c8SManish Chopra 	num_vports = p_hwfn->qm_info.num_vports;
1831bcd197c8SManish Chopra 
1832bcd197c8SManish Chopra 	/* Accounting for the vports which are configured for WFQ explicitly */
1833bcd197c8SManish Chopra 	for (i = 0; i < num_vports; i++) {
1834bcd197c8SManish Chopra 		u32 tmp_speed;
1835bcd197c8SManish Chopra 
1836bcd197c8SManish Chopra 		if ((i != vport_id) &&
1837bcd197c8SManish Chopra 		    p_hwfn->qm_info.wfq_data[i].configured) {
1838bcd197c8SManish Chopra 			req_count++;
1839bcd197c8SManish Chopra 			tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
1840bcd197c8SManish Chopra 			total_req_min_rate += tmp_speed;
1841bcd197c8SManish Chopra 		}
1842bcd197c8SManish Chopra 	}
1843bcd197c8SManish Chopra 
1844bcd197c8SManish Chopra 	/* Include current vport data as well */
1845bcd197c8SManish Chopra 	req_count++;
1846bcd197c8SManish Chopra 	total_req_min_rate += req_rate;
1847bcd197c8SManish Chopra 	non_requested_count = num_vports - req_count;
1848bcd197c8SManish Chopra 
1849bcd197c8SManish Chopra 	if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
1850bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1851bcd197c8SManish Chopra 			   "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1852bcd197c8SManish Chopra 			   vport_id, req_rate, min_pf_rate);
1853bcd197c8SManish Chopra 		return -EINVAL;
1854bcd197c8SManish Chopra 	}
1855bcd197c8SManish Chopra 
1856bcd197c8SManish Chopra 	if (num_vports > QED_WFQ_UNIT) {
1857bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1858bcd197c8SManish Chopra 			   "Number of vports is greater than %d\n",
1859bcd197c8SManish Chopra 			   QED_WFQ_UNIT);
1860bcd197c8SManish Chopra 		return -EINVAL;
1861bcd197c8SManish Chopra 	}
1862bcd197c8SManish Chopra 
1863bcd197c8SManish Chopra 	if (total_req_min_rate > min_pf_rate) {
1864bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1865bcd197c8SManish Chopra 			   "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
1866bcd197c8SManish Chopra 			   total_req_min_rate, min_pf_rate);
1867bcd197c8SManish Chopra 		return -EINVAL;
1868bcd197c8SManish Chopra 	}
1869bcd197c8SManish Chopra 
1870bcd197c8SManish Chopra 	total_left_rate	= min_pf_rate - total_req_min_rate;
1871bcd197c8SManish Chopra 
1872bcd197c8SManish Chopra 	left_rate_per_vp = total_left_rate / non_requested_count;
1873bcd197c8SManish Chopra 	if (left_rate_per_vp <  min_pf_rate / QED_WFQ_UNIT) {
1874bcd197c8SManish Chopra 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1875bcd197c8SManish Chopra 			   "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
1876bcd197c8SManish Chopra 			   left_rate_per_vp, min_pf_rate);
1877bcd197c8SManish Chopra 		return -EINVAL;
1878bcd197c8SManish Chopra 	}
1879bcd197c8SManish Chopra 
1880bcd197c8SManish Chopra 	p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
1881bcd197c8SManish Chopra 	p_hwfn->qm_info.wfq_data[vport_id].configured = true;
1882bcd197c8SManish Chopra 
1883bcd197c8SManish Chopra 	for (i = 0; i < num_vports; i++) {
1884bcd197c8SManish Chopra 		if (p_hwfn->qm_info.wfq_data[i].configured)
1885bcd197c8SManish Chopra 			continue;
1886bcd197c8SManish Chopra 
1887bcd197c8SManish Chopra 		p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
1888bcd197c8SManish Chopra 	}
1889bcd197c8SManish Chopra 
1890bcd197c8SManish Chopra 	return 0;
1891bcd197c8SManish Chopra }
1892bcd197c8SManish Chopra 
1893733def6aSYuval Mintz static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
1894733def6aSYuval Mintz 				     struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
1895733def6aSYuval Mintz {
1896733def6aSYuval Mintz 	struct qed_mcp_link_state *p_link;
1897733def6aSYuval Mintz 	int rc = 0;
1898733def6aSYuval Mintz 
1899733def6aSYuval Mintz 	p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
1900733def6aSYuval Mintz 
1901733def6aSYuval Mintz 	if (!p_link->min_pf_rate) {
1902733def6aSYuval Mintz 		p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
1903733def6aSYuval Mintz 		p_hwfn->qm_info.wfq_data[vp_id].configured = true;
1904733def6aSYuval Mintz 		return rc;
1905733def6aSYuval Mintz 	}
1906733def6aSYuval Mintz 
1907733def6aSYuval Mintz 	rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
1908733def6aSYuval Mintz 
1909733def6aSYuval Mintz 	if (rc == 0)
1910733def6aSYuval Mintz 		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
1911733def6aSYuval Mintz 						 p_link->min_pf_rate);
1912733def6aSYuval Mintz 	else
1913733def6aSYuval Mintz 		DP_NOTICE(p_hwfn,
1914733def6aSYuval Mintz 			  "Validation failed while configuring min rate\n");
1915733def6aSYuval Mintz 
1916733def6aSYuval Mintz 	return rc;
1917733def6aSYuval Mintz }
1918733def6aSYuval Mintz 
1919bcd197c8SManish Chopra static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
1920bcd197c8SManish Chopra 						 struct qed_ptt *p_ptt,
1921bcd197c8SManish Chopra 						 u32 min_pf_rate)
1922bcd197c8SManish Chopra {
1923bcd197c8SManish Chopra 	bool use_wfq = false;
1924bcd197c8SManish Chopra 	int rc = 0;
1925bcd197c8SManish Chopra 	u16 i;
1926bcd197c8SManish Chopra 
1927bcd197c8SManish Chopra 	/* Validate all pre configured vports for wfq */
1928bcd197c8SManish Chopra 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
1929bcd197c8SManish Chopra 		u32 rate;
1930bcd197c8SManish Chopra 
1931bcd197c8SManish Chopra 		if (!p_hwfn->qm_info.wfq_data[i].configured)
1932bcd197c8SManish Chopra 			continue;
1933bcd197c8SManish Chopra 
1934bcd197c8SManish Chopra 		rate = p_hwfn->qm_info.wfq_data[i].min_speed;
1935bcd197c8SManish Chopra 		use_wfq = true;
1936bcd197c8SManish Chopra 
1937bcd197c8SManish Chopra 		rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
1938bcd197c8SManish Chopra 		if (rc) {
1939bcd197c8SManish Chopra 			DP_NOTICE(p_hwfn,
1940bcd197c8SManish Chopra 				  "WFQ validation failed while configuring min rate\n");
1941bcd197c8SManish Chopra 			break;
1942bcd197c8SManish Chopra 		}
1943bcd197c8SManish Chopra 	}
1944bcd197c8SManish Chopra 
1945bcd197c8SManish Chopra 	if (!rc && use_wfq)
1946bcd197c8SManish Chopra 		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
1947bcd197c8SManish Chopra 	else
1948bcd197c8SManish Chopra 		qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
1949bcd197c8SManish Chopra 
1950bcd197c8SManish Chopra 	return rc;
1951bcd197c8SManish Chopra }
1952bcd197c8SManish Chopra 
1953733def6aSYuval Mintz /* Main API for qed clients to configure vport min rate.
1954733def6aSYuval Mintz  * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
1955733def6aSYuval Mintz  * rate - Speed in Mbps needs to be assigned to a given vport.
1956733def6aSYuval Mintz  */
1957733def6aSYuval Mintz int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
1958733def6aSYuval Mintz {
1959733def6aSYuval Mintz 	int i, rc = -EINVAL;
1960733def6aSYuval Mintz 
1961733def6aSYuval Mintz 	/* Currently not supported; Might change in future */
1962733def6aSYuval Mintz 	if (cdev->num_hwfns > 1) {
1963733def6aSYuval Mintz 		DP_NOTICE(cdev,
1964733def6aSYuval Mintz 			  "WFQ configuration is not supported for this device\n");
1965733def6aSYuval Mintz 		return rc;
1966733def6aSYuval Mintz 	}
1967733def6aSYuval Mintz 
1968733def6aSYuval Mintz 	for_each_hwfn(cdev, i) {
1969733def6aSYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1970733def6aSYuval Mintz 		struct qed_ptt *p_ptt;
1971733def6aSYuval Mintz 
1972733def6aSYuval Mintz 		p_ptt = qed_ptt_acquire(p_hwfn);
1973733def6aSYuval Mintz 		if (!p_ptt)
1974733def6aSYuval Mintz 			return -EBUSY;
1975733def6aSYuval Mintz 
1976733def6aSYuval Mintz 		rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
1977733def6aSYuval Mintz 
1978733def6aSYuval Mintz 		if (!rc) {
1979733def6aSYuval Mintz 			qed_ptt_release(p_hwfn, p_ptt);
1980733def6aSYuval Mintz 			return rc;
1981733def6aSYuval Mintz 		}
1982733def6aSYuval Mintz 
1983733def6aSYuval Mintz 		qed_ptt_release(p_hwfn, p_ptt);
1984733def6aSYuval Mintz 	}
1985733def6aSYuval Mintz 
1986733def6aSYuval Mintz 	return rc;
1987733def6aSYuval Mintz }
1988733def6aSYuval Mintz 
1989bcd197c8SManish Chopra /* API to configure WFQ from mcp link change */
1990bcd197c8SManish Chopra void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
1991bcd197c8SManish Chopra {
1992bcd197c8SManish Chopra 	int i;
1993bcd197c8SManish Chopra 
1994bcd197c8SManish Chopra 	for_each_hwfn(cdev, i) {
1995bcd197c8SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1996bcd197c8SManish Chopra 
1997bcd197c8SManish Chopra 		__qed_configure_vp_wfq_on_link_change(p_hwfn,
1998bcd197c8SManish Chopra 						      p_hwfn->p_dpc_ptt,
1999bcd197c8SManish Chopra 						      min_pf_rate);
2000bcd197c8SManish Chopra 	}
2001bcd197c8SManish Chopra }
20024b01e519SManish Chopra 
20034b01e519SManish Chopra int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
20044b01e519SManish Chopra 				     struct qed_ptt *p_ptt,
20054b01e519SManish Chopra 				     struct qed_mcp_link_state *p_link,
20064b01e519SManish Chopra 				     u8 max_bw)
20074b01e519SManish Chopra {
20084b01e519SManish Chopra 	int rc = 0;
20094b01e519SManish Chopra 
20104b01e519SManish Chopra 	p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
20114b01e519SManish Chopra 
20124b01e519SManish Chopra 	if (!p_link->line_speed && (max_bw != 100))
20134b01e519SManish Chopra 		return rc;
20144b01e519SManish Chopra 
20154b01e519SManish Chopra 	p_link->speed = (p_link->line_speed * max_bw) / 100;
20164b01e519SManish Chopra 	p_hwfn->qm_info.pf_rl = p_link->speed;
20174b01e519SManish Chopra 
20184b01e519SManish Chopra 	/* Since the limiter also affects Tx-switched traffic, we don't want it
20194b01e519SManish Chopra 	 * to limit such traffic in case there's no actual limit.
20204b01e519SManish Chopra 	 * In that case, set limit to imaginary high boundary.
20214b01e519SManish Chopra 	 */
20224b01e519SManish Chopra 	if (max_bw == 100)
20234b01e519SManish Chopra 		p_hwfn->qm_info.pf_rl = 100000;
20244b01e519SManish Chopra 
20254b01e519SManish Chopra 	rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
20264b01e519SManish Chopra 			    p_hwfn->qm_info.pf_rl);
20274b01e519SManish Chopra 
20284b01e519SManish Chopra 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
20294b01e519SManish Chopra 		   "Configured MAX bandwidth to be %08x Mb/sec\n",
20304b01e519SManish Chopra 		   p_link->speed);
20314b01e519SManish Chopra 
20324b01e519SManish Chopra 	return rc;
20334b01e519SManish Chopra }
20344b01e519SManish Chopra 
20354b01e519SManish Chopra /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
20364b01e519SManish Chopra int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
20374b01e519SManish Chopra {
20384b01e519SManish Chopra 	int i, rc = -EINVAL;
20394b01e519SManish Chopra 
20404b01e519SManish Chopra 	if (max_bw < 1 || max_bw > 100) {
20414b01e519SManish Chopra 		DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
20424b01e519SManish Chopra 		return rc;
20434b01e519SManish Chopra 	}
20444b01e519SManish Chopra 
20454b01e519SManish Chopra 	for_each_hwfn(cdev, i) {
20464b01e519SManish Chopra 		struct qed_hwfn	*p_hwfn = &cdev->hwfns[i];
20474b01e519SManish Chopra 		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
20484b01e519SManish Chopra 		struct qed_mcp_link_state *p_link;
20494b01e519SManish Chopra 		struct qed_ptt *p_ptt;
20504b01e519SManish Chopra 
20514b01e519SManish Chopra 		p_link = &p_lead->mcp_info->link_output;
20524b01e519SManish Chopra 
20534b01e519SManish Chopra 		p_ptt = qed_ptt_acquire(p_hwfn);
20544b01e519SManish Chopra 		if (!p_ptt)
20554b01e519SManish Chopra 			return -EBUSY;
20564b01e519SManish Chopra 
20574b01e519SManish Chopra 		rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
20584b01e519SManish Chopra 						      p_link, max_bw);
20594b01e519SManish Chopra 
20604b01e519SManish Chopra 		qed_ptt_release(p_hwfn, p_ptt);
20614b01e519SManish Chopra 
20624b01e519SManish Chopra 		if (rc)
20634b01e519SManish Chopra 			break;
20644b01e519SManish Chopra 	}
20654b01e519SManish Chopra 
20664b01e519SManish Chopra 	return rc;
20674b01e519SManish Chopra }
2068a64b02d5SManish Chopra 
2069a64b02d5SManish Chopra int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
2070a64b02d5SManish Chopra 				     struct qed_ptt *p_ptt,
2071a64b02d5SManish Chopra 				     struct qed_mcp_link_state *p_link,
2072a64b02d5SManish Chopra 				     u8 min_bw)
2073a64b02d5SManish Chopra {
2074a64b02d5SManish Chopra 	int rc = 0;
2075a64b02d5SManish Chopra 
2076a64b02d5SManish Chopra 	p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
2077a64b02d5SManish Chopra 	p_hwfn->qm_info.pf_wfq = min_bw;
2078a64b02d5SManish Chopra 
2079a64b02d5SManish Chopra 	if (!p_link->line_speed)
2080a64b02d5SManish Chopra 		return rc;
2081a64b02d5SManish Chopra 
2082a64b02d5SManish Chopra 	p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
2083a64b02d5SManish Chopra 
2084a64b02d5SManish Chopra 	rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
2085a64b02d5SManish Chopra 
2086a64b02d5SManish Chopra 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
2087a64b02d5SManish Chopra 		   "Configured MIN bandwidth to be %d Mb/sec\n",
2088a64b02d5SManish Chopra 		   p_link->min_pf_rate);
2089a64b02d5SManish Chopra 
2090a64b02d5SManish Chopra 	return rc;
2091a64b02d5SManish Chopra }
2092a64b02d5SManish Chopra 
2093a64b02d5SManish Chopra /* Main API to configure PF min bandwidth where bw range is [1-100] */
2094a64b02d5SManish Chopra int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
2095a64b02d5SManish Chopra {
2096a64b02d5SManish Chopra 	int i, rc = -EINVAL;
2097a64b02d5SManish Chopra 
2098a64b02d5SManish Chopra 	if (min_bw < 1 || min_bw > 100) {
2099a64b02d5SManish Chopra 		DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
2100a64b02d5SManish Chopra 		return rc;
2101a64b02d5SManish Chopra 	}
2102a64b02d5SManish Chopra 
2103a64b02d5SManish Chopra 	for_each_hwfn(cdev, i) {
2104a64b02d5SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2105a64b02d5SManish Chopra 		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
2106a64b02d5SManish Chopra 		struct qed_mcp_link_state *p_link;
2107a64b02d5SManish Chopra 		struct qed_ptt *p_ptt;
2108a64b02d5SManish Chopra 
2109a64b02d5SManish Chopra 		p_link = &p_lead->mcp_info->link_output;
2110a64b02d5SManish Chopra 
2111a64b02d5SManish Chopra 		p_ptt = qed_ptt_acquire(p_hwfn);
2112a64b02d5SManish Chopra 		if (!p_ptt)
2113a64b02d5SManish Chopra 			return -EBUSY;
2114a64b02d5SManish Chopra 
2115a64b02d5SManish Chopra 		rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
2116a64b02d5SManish Chopra 						      p_link, min_bw);
2117a64b02d5SManish Chopra 		if (rc) {
2118a64b02d5SManish Chopra 			qed_ptt_release(p_hwfn, p_ptt);
2119a64b02d5SManish Chopra 			return rc;
2120a64b02d5SManish Chopra 		}
2121a64b02d5SManish Chopra 
2122a64b02d5SManish Chopra 		if (p_link->min_pf_rate) {
2123a64b02d5SManish Chopra 			u32 min_rate = p_link->min_pf_rate;
2124a64b02d5SManish Chopra 
2125a64b02d5SManish Chopra 			rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
2126a64b02d5SManish Chopra 								   p_ptt,
2127a64b02d5SManish Chopra 								   min_rate);
2128a64b02d5SManish Chopra 		}
2129a64b02d5SManish Chopra 
2130a64b02d5SManish Chopra 		qed_ptt_release(p_hwfn, p_ptt);
2131a64b02d5SManish Chopra 	}
2132a64b02d5SManish Chopra 
2133a64b02d5SManish Chopra 	return rc;
2134a64b02d5SManish Chopra }
2135733def6aSYuval Mintz 
2136733def6aSYuval Mintz void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2137733def6aSYuval Mintz {
2138733def6aSYuval Mintz 	struct qed_mcp_link_state *p_link;
2139733def6aSYuval Mintz 
2140733def6aSYuval Mintz 	p_link = &p_hwfn->mcp_info->link_output;
2141733def6aSYuval Mintz 
2142733def6aSYuval Mintz 	if (p_link->min_pf_rate)
2143733def6aSYuval Mintz 		qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
2144733def6aSYuval Mintz 					       p_link->min_pf_rate);
2145733def6aSYuval Mintz 
2146733def6aSYuval Mintz 	memset(p_hwfn->qm_info.wfq_data, 0,
2147733def6aSYuval Mintz 	       sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
2148733def6aSYuval Mintz }
2149