1fe56b9e6SYuval Mintz /* QLogic qed NIC Driver
2fe56b9e6SYuval Mintz  * Copyright (c) 2015 QLogic Corporation
3fe56b9e6SYuval Mintz  *
4fe56b9e6SYuval Mintz  * This software is available under the terms of the GNU General Public License
5fe56b9e6SYuval Mintz  * (GPL) Version 2, available from the file COPYING in the main directory of
6fe56b9e6SYuval Mintz  * this source tree.
7fe56b9e6SYuval Mintz  */
8fe56b9e6SYuval Mintz 
9fe56b9e6SYuval Mintz #include <linux/types.h>
10fe56b9e6SYuval Mintz #include <asm/byteorder.h>
11fe56b9e6SYuval Mintz #include <linux/io.h>
12fe56b9e6SYuval Mintz #include <linux/delay.h>
13fe56b9e6SYuval Mintz #include <linux/dma-mapping.h>
14fe56b9e6SYuval Mintz #include <linux/errno.h>
15fe56b9e6SYuval Mintz #include <linux/kernel.h>
16fe56b9e6SYuval Mintz #include <linux/mutex.h>
17fe56b9e6SYuval Mintz #include <linux/pci.h>
18fe56b9e6SYuval Mintz #include <linux/slab.h>
19fe56b9e6SYuval Mintz #include <linux/string.h>
20fe56b9e6SYuval Mintz #include <linux/etherdevice.h>
21fe56b9e6SYuval Mintz #include <linux/qed/qed_chain.h>
22fe56b9e6SYuval Mintz #include <linux/qed/qed_if.h>
23fe56b9e6SYuval Mintz #include "qed.h"
24fe56b9e6SYuval Mintz #include "qed_cxt.h"
25fe56b9e6SYuval Mintz #include "qed_dev_api.h"
26fe56b9e6SYuval Mintz #include "qed_hsi.h"
27fe56b9e6SYuval Mintz #include "qed_hw.h"
28fe56b9e6SYuval Mintz #include "qed_init_ops.h"
29fe56b9e6SYuval Mintz #include "qed_int.h"
30fe56b9e6SYuval Mintz #include "qed_mcp.h"
31fe56b9e6SYuval Mintz #include "qed_reg_addr.h"
32fe56b9e6SYuval Mintz #include "qed_sp.h"
33fe56b9e6SYuval Mintz 
34fe56b9e6SYuval Mintz /* API common to all protocols */
35fe56b9e6SYuval Mintz void qed_init_dp(struct qed_dev *cdev,
36fe56b9e6SYuval Mintz 		 u32 dp_module, u8 dp_level)
37fe56b9e6SYuval Mintz {
38fe56b9e6SYuval Mintz 	u32 i;
39fe56b9e6SYuval Mintz 
40fe56b9e6SYuval Mintz 	cdev->dp_level = dp_level;
41fe56b9e6SYuval Mintz 	cdev->dp_module = dp_module;
42fe56b9e6SYuval Mintz 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
43fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
44fe56b9e6SYuval Mintz 
45fe56b9e6SYuval Mintz 		p_hwfn->dp_level = dp_level;
46fe56b9e6SYuval Mintz 		p_hwfn->dp_module = dp_module;
47fe56b9e6SYuval Mintz 	}
48fe56b9e6SYuval Mintz }
49fe56b9e6SYuval Mintz 
50fe56b9e6SYuval Mintz void qed_init_struct(struct qed_dev *cdev)
51fe56b9e6SYuval Mintz {
52fe56b9e6SYuval Mintz 	u8 i;
53fe56b9e6SYuval Mintz 
54fe56b9e6SYuval Mintz 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
55fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
56fe56b9e6SYuval Mintz 
57fe56b9e6SYuval Mintz 		p_hwfn->cdev = cdev;
58fe56b9e6SYuval Mintz 		p_hwfn->my_id = i;
59fe56b9e6SYuval Mintz 		p_hwfn->b_active = false;
60fe56b9e6SYuval Mintz 
61fe56b9e6SYuval Mintz 		mutex_init(&p_hwfn->dmae_info.mutex);
62fe56b9e6SYuval Mintz 	}
63fe56b9e6SYuval Mintz 
64fe56b9e6SYuval Mintz 	/* hwfn 0 is always active */
65fe56b9e6SYuval Mintz 	cdev->hwfns[0].b_active = true;
66fe56b9e6SYuval Mintz 
67fe56b9e6SYuval Mintz 	/* set the default cache alignment to 128 */
68fe56b9e6SYuval Mintz 	cdev->cache_shift = 7;
69fe56b9e6SYuval Mintz }
70fe56b9e6SYuval Mintz 
71fe56b9e6SYuval Mintz static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
72fe56b9e6SYuval Mintz {
73fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
74fe56b9e6SYuval Mintz 
75fe56b9e6SYuval Mintz 	kfree(qm_info->qm_pq_params);
76fe56b9e6SYuval Mintz 	qm_info->qm_pq_params = NULL;
77fe56b9e6SYuval Mintz 	kfree(qm_info->qm_vport_params);
78fe56b9e6SYuval Mintz 	qm_info->qm_vport_params = NULL;
79fe56b9e6SYuval Mintz 	kfree(qm_info->qm_port_params);
80fe56b9e6SYuval Mintz 	qm_info->qm_port_params = NULL;
81fe56b9e6SYuval Mintz }
82fe56b9e6SYuval Mintz 
83fe56b9e6SYuval Mintz void qed_resc_free(struct qed_dev *cdev)
84fe56b9e6SYuval Mintz {
85fe56b9e6SYuval Mintz 	int i;
86fe56b9e6SYuval Mintz 
87fe56b9e6SYuval Mintz 	kfree(cdev->fw_data);
88fe56b9e6SYuval Mintz 	cdev->fw_data = NULL;
89fe56b9e6SYuval Mintz 
90fe56b9e6SYuval Mintz 	kfree(cdev->reset_stats);
91fe56b9e6SYuval Mintz 
92fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
93fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
94fe56b9e6SYuval Mintz 
9525c089d7SYuval Mintz 		kfree(p_hwfn->p_tx_cids);
9625c089d7SYuval Mintz 		p_hwfn->p_tx_cids = NULL;
9725c089d7SYuval Mintz 		kfree(p_hwfn->p_rx_cids);
9825c089d7SYuval Mintz 		p_hwfn->p_rx_cids = NULL;
9925c089d7SYuval Mintz 	}
10025c089d7SYuval Mintz 
10125c089d7SYuval Mintz 	for_each_hwfn(cdev, i) {
10225c089d7SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
10325c089d7SYuval Mintz 
104fe56b9e6SYuval Mintz 		qed_cxt_mngr_free(p_hwfn);
105fe56b9e6SYuval Mintz 		qed_qm_info_free(p_hwfn);
106fe56b9e6SYuval Mintz 		qed_spq_free(p_hwfn);
107fe56b9e6SYuval Mintz 		qed_eq_free(p_hwfn, p_hwfn->p_eq);
108fe56b9e6SYuval Mintz 		qed_consq_free(p_hwfn, p_hwfn->p_consq);
109fe56b9e6SYuval Mintz 		qed_int_free(p_hwfn);
110fe56b9e6SYuval Mintz 		qed_dmae_info_free(p_hwfn);
111fe56b9e6SYuval Mintz 	}
112fe56b9e6SYuval Mintz }
113fe56b9e6SYuval Mintz 
114fe56b9e6SYuval Mintz static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
115fe56b9e6SYuval Mintz {
116fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
117fe56b9e6SYuval Mintz 	struct init_qm_port_params *p_qm_port;
118fe56b9e6SYuval Mintz 	u8 num_vports, i, vport_id, num_ports;
119fe56b9e6SYuval Mintz 	u16 num_pqs, multi_cos_tcs = 1;
120fe56b9e6SYuval Mintz 
121fe56b9e6SYuval Mintz 	memset(qm_info, 0, sizeof(*qm_info));
122fe56b9e6SYuval Mintz 
123fe56b9e6SYuval Mintz 	num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
124fe56b9e6SYuval Mintz 	num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
125fe56b9e6SYuval Mintz 
126fe56b9e6SYuval Mintz 	/* Sanity checking that setup requires legal number of resources */
127fe56b9e6SYuval Mintz 	if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
128fe56b9e6SYuval Mintz 		DP_ERR(p_hwfn,
129fe56b9e6SYuval Mintz 		       "Need too many Physical queues - 0x%04x when only %04x are available\n",
130fe56b9e6SYuval Mintz 		       num_pqs, RESC_NUM(p_hwfn, QED_PQ));
131fe56b9e6SYuval Mintz 		return -EINVAL;
132fe56b9e6SYuval Mintz 	}
133fe56b9e6SYuval Mintz 
134fe56b9e6SYuval Mintz 	/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
135fe56b9e6SYuval Mintz 	 */
136fe56b9e6SYuval Mintz 	qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
137fe56b9e6SYuval Mintz 					num_pqs, GFP_ATOMIC);
138fe56b9e6SYuval Mintz 	if (!qm_info->qm_pq_params)
139fe56b9e6SYuval Mintz 		goto alloc_err;
140fe56b9e6SYuval Mintz 
141fe56b9e6SYuval Mintz 	qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
142fe56b9e6SYuval Mintz 					   num_vports, GFP_ATOMIC);
143fe56b9e6SYuval Mintz 	if (!qm_info->qm_vport_params)
144fe56b9e6SYuval Mintz 		goto alloc_err;
145fe56b9e6SYuval Mintz 
146fe56b9e6SYuval Mintz 	qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
147fe56b9e6SYuval Mintz 					  MAX_NUM_PORTS, GFP_ATOMIC);
148fe56b9e6SYuval Mintz 	if (!qm_info->qm_port_params)
149fe56b9e6SYuval Mintz 		goto alloc_err;
150fe56b9e6SYuval Mintz 
151fe56b9e6SYuval Mintz 	vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
152fe56b9e6SYuval Mintz 
153fe56b9e6SYuval Mintz 	/* First init per-TC PQs */
154fe56b9e6SYuval Mintz 	for (i = 0; i < multi_cos_tcs; i++) {
155fe56b9e6SYuval Mintz 		struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
156fe56b9e6SYuval Mintz 
157fe56b9e6SYuval Mintz 		params->vport_id = vport_id;
158fe56b9e6SYuval Mintz 		params->tc_id = p_hwfn->hw_info.non_offload_tc;
159fe56b9e6SYuval Mintz 		params->wrr_group = 1;
160fe56b9e6SYuval Mintz 	}
161fe56b9e6SYuval Mintz 
162fe56b9e6SYuval Mintz 	/* Then init pure-LB PQ */
163fe56b9e6SYuval Mintz 	qm_info->pure_lb_pq = i;
164fe56b9e6SYuval Mintz 	qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
165fe56b9e6SYuval Mintz 	qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
166fe56b9e6SYuval Mintz 	qm_info->qm_pq_params[i].wrr_group = 1;
167fe56b9e6SYuval Mintz 	i++;
168fe56b9e6SYuval Mintz 
169fe56b9e6SYuval Mintz 	qm_info->offload_pq = 0;
170fe56b9e6SYuval Mintz 	qm_info->num_pqs = num_pqs;
171fe56b9e6SYuval Mintz 	qm_info->num_vports = num_vports;
172fe56b9e6SYuval Mintz 
173fe56b9e6SYuval Mintz 	/* Initialize qm port parameters */
174fe56b9e6SYuval Mintz 	num_ports = p_hwfn->cdev->num_ports_in_engines;
175fe56b9e6SYuval Mintz 	for (i = 0; i < num_ports; i++) {
176fe56b9e6SYuval Mintz 		p_qm_port = &qm_info->qm_port_params[i];
177fe56b9e6SYuval Mintz 		p_qm_port->active = 1;
178fe56b9e6SYuval Mintz 		p_qm_port->num_active_phys_tcs = 4;
179fe56b9e6SYuval Mintz 		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
180fe56b9e6SYuval Mintz 		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
181fe56b9e6SYuval Mintz 	}
182fe56b9e6SYuval Mintz 
183fe56b9e6SYuval Mintz 	qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
184fe56b9e6SYuval Mintz 
185fe56b9e6SYuval Mintz 	qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
186fe56b9e6SYuval Mintz 
187fe56b9e6SYuval Mintz 	qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
188fe56b9e6SYuval Mintz 
189fe56b9e6SYuval Mintz 	qm_info->pf_wfq = 0;
190fe56b9e6SYuval Mintz 	qm_info->pf_rl = 0;
191fe56b9e6SYuval Mintz 	qm_info->vport_rl_en = 1;
192fe56b9e6SYuval Mintz 
193fe56b9e6SYuval Mintz 	return 0;
194fe56b9e6SYuval Mintz 
195fe56b9e6SYuval Mintz alloc_err:
196fe56b9e6SYuval Mintz 	DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
197fe56b9e6SYuval Mintz 	kfree(qm_info->qm_pq_params);
198fe56b9e6SYuval Mintz 	kfree(qm_info->qm_vport_params);
199fe56b9e6SYuval Mintz 	kfree(qm_info->qm_port_params);
200fe56b9e6SYuval Mintz 
201fe56b9e6SYuval Mintz 	return -ENOMEM;
202fe56b9e6SYuval Mintz }
203fe56b9e6SYuval Mintz 
204fe56b9e6SYuval Mintz int qed_resc_alloc(struct qed_dev *cdev)
205fe56b9e6SYuval Mintz {
206fe56b9e6SYuval Mintz 	struct qed_consq *p_consq;
207fe56b9e6SYuval Mintz 	struct qed_eq *p_eq;
208fe56b9e6SYuval Mintz 	int i, rc = 0;
209fe56b9e6SYuval Mintz 
210fe56b9e6SYuval Mintz 	cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
211fe56b9e6SYuval Mintz 	if (!cdev->fw_data)
212fe56b9e6SYuval Mintz 		return -ENOMEM;
213fe56b9e6SYuval Mintz 
21425c089d7SYuval Mintz 	/* Allocate Memory for the Queue->CID mapping */
21525c089d7SYuval Mintz 	for_each_hwfn(cdev, i) {
21625c089d7SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
21725c089d7SYuval Mintz 		int tx_size = sizeof(struct qed_hw_cid_data) *
21825c089d7SYuval Mintz 				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
21925c089d7SYuval Mintz 		int rx_size = sizeof(struct qed_hw_cid_data) *
22025c089d7SYuval Mintz 				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
22125c089d7SYuval Mintz 
22225c089d7SYuval Mintz 		p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
22325c089d7SYuval Mintz 		if (!p_hwfn->p_tx_cids) {
22425c089d7SYuval Mintz 			DP_NOTICE(p_hwfn,
22525c089d7SYuval Mintz 				  "Failed to allocate memory for Tx Cids\n");
22625c089d7SYuval Mintz 			goto alloc_err;
22725c089d7SYuval Mintz 		}
22825c089d7SYuval Mintz 
22925c089d7SYuval Mintz 		p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
23025c089d7SYuval Mintz 		if (!p_hwfn->p_rx_cids) {
23125c089d7SYuval Mintz 			DP_NOTICE(p_hwfn,
23225c089d7SYuval Mintz 				  "Failed to allocate memory for Rx Cids\n");
23325c089d7SYuval Mintz 			goto alloc_err;
23425c089d7SYuval Mintz 		}
23525c089d7SYuval Mintz 	}
23625c089d7SYuval Mintz 
237fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
238fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
239fe56b9e6SYuval Mintz 
240fe56b9e6SYuval Mintz 		/* First allocate the context manager structure */
241fe56b9e6SYuval Mintz 		rc = qed_cxt_mngr_alloc(p_hwfn);
242fe56b9e6SYuval Mintz 		if (rc)
243fe56b9e6SYuval Mintz 			goto alloc_err;
244fe56b9e6SYuval Mintz 
245fe56b9e6SYuval Mintz 		/* Set the HW cid/tid numbers (in the contest manager)
246fe56b9e6SYuval Mintz 		 * Must be done prior to any further computations.
247fe56b9e6SYuval Mintz 		 */
248fe56b9e6SYuval Mintz 		rc = qed_cxt_set_pf_params(p_hwfn);
249fe56b9e6SYuval Mintz 		if (rc)
250fe56b9e6SYuval Mintz 			goto alloc_err;
251fe56b9e6SYuval Mintz 
252fe56b9e6SYuval Mintz 		/* Prepare and process QM requirements */
253fe56b9e6SYuval Mintz 		rc = qed_init_qm_info(p_hwfn);
254fe56b9e6SYuval Mintz 		if (rc)
255fe56b9e6SYuval Mintz 			goto alloc_err;
256fe56b9e6SYuval Mintz 
257fe56b9e6SYuval Mintz 		/* Compute the ILT client partition */
258fe56b9e6SYuval Mintz 		rc = qed_cxt_cfg_ilt_compute(p_hwfn);
259fe56b9e6SYuval Mintz 		if (rc)
260fe56b9e6SYuval Mintz 			goto alloc_err;
261fe56b9e6SYuval Mintz 
262fe56b9e6SYuval Mintz 		/* CID map / ILT shadow table / T2
263fe56b9e6SYuval Mintz 		 * The talbes sizes are determined by the computations above
264fe56b9e6SYuval Mintz 		 */
265fe56b9e6SYuval Mintz 		rc = qed_cxt_tables_alloc(p_hwfn);
266fe56b9e6SYuval Mintz 		if (rc)
267fe56b9e6SYuval Mintz 			goto alloc_err;
268fe56b9e6SYuval Mintz 
269fe56b9e6SYuval Mintz 		/* SPQ, must follow ILT because initializes SPQ context */
270fe56b9e6SYuval Mintz 		rc = qed_spq_alloc(p_hwfn);
271fe56b9e6SYuval Mintz 		if (rc)
272fe56b9e6SYuval Mintz 			goto alloc_err;
273fe56b9e6SYuval Mintz 
274fe56b9e6SYuval Mintz 		/* SP status block allocation */
275fe56b9e6SYuval Mintz 		p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
276fe56b9e6SYuval Mintz 							 RESERVED_PTT_DPC);
277fe56b9e6SYuval Mintz 
278fe56b9e6SYuval Mintz 		rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
279fe56b9e6SYuval Mintz 		if (rc)
280fe56b9e6SYuval Mintz 			goto alloc_err;
281fe56b9e6SYuval Mintz 
282fe56b9e6SYuval Mintz 		/* EQ */
283fe56b9e6SYuval Mintz 		p_eq = qed_eq_alloc(p_hwfn, 256);
284fe56b9e6SYuval Mintz 
285fe56b9e6SYuval Mintz 		if (!p_eq)
286fe56b9e6SYuval Mintz 			goto alloc_err;
287fe56b9e6SYuval Mintz 		p_hwfn->p_eq = p_eq;
288fe56b9e6SYuval Mintz 
289fe56b9e6SYuval Mintz 		p_consq = qed_consq_alloc(p_hwfn);
290fe56b9e6SYuval Mintz 		if (!p_consq)
291fe56b9e6SYuval Mintz 			goto alloc_err;
292fe56b9e6SYuval Mintz 		p_hwfn->p_consq = p_consq;
293fe56b9e6SYuval Mintz 
294fe56b9e6SYuval Mintz 		/* DMA info initialization */
295fe56b9e6SYuval Mintz 		rc = qed_dmae_info_alloc(p_hwfn);
296fe56b9e6SYuval Mintz 		if (rc) {
297fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn,
298fe56b9e6SYuval Mintz 				  "Failed to allocate memory for dmae_info structure\n");
299fe56b9e6SYuval Mintz 			goto alloc_err;
300fe56b9e6SYuval Mintz 		}
301fe56b9e6SYuval Mintz 	}
302fe56b9e6SYuval Mintz 
303fe56b9e6SYuval Mintz 	cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
304fe56b9e6SYuval Mintz 	if (!cdev->reset_stats) {
305fe56b9e6SYuval Mintz 		DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
306fe56b9e6SYuval Mintz 		goto alloc_err;
307fe56b9e6SYuval Mintz 	}
308fe56b9e6SYuval Mintz 
309fe56b9e6SYuval Mintz 	return 0;
310fe56b9e6SYuval Mintz 
311fe56b9e6SYuval Mintz alloc_err:
312fe56b9e6SYuval Mintz 	qed_resc_free(cdev);
313fe56b9e6SYuval Mintz 	return rc;
314fe56b9e6SYuval Mintz }
315fe56b9e6SYuval Mintz 
316fe56b9e6SYuval Mintz void qed_resc_setup(struct qed_dev *cdev)
317fe56b9e6SYuval Mintz {
318fe56b9e6SYuval Mintz 	int i;
319fe56b9e6SYuval Mintz 
320fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
321fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
322fe56b9e6SYuval Mintz 
323fe56b9e6SYuval Mintz 		qed_cxt_mngr_setup(p_hwfn);
324fe56b9e6SYuval Mintz 		qed_spq_setup(p_hwfn);
325fe56b9e6SYuval Mintz 		qed_eq_setup(p_hwfn, p_hwfn->p_eq);
326fe56b9e6SYuval Mintz 		qed_consq_setup(p_hwfn, p_hwfn->p_consq);
327fe56b9e6SYuval Mintz 
328fe56b9e6SYuval Mintz 		/* Read shadow of current MFW mailbox */
329fe56b9e6SYuval Mintz 		qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
330fe56b9e6SYuval Mintz 		memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
331fe56b9e6SYuval Mintz 		       p_hwfn->mcp_info->mfw_mb_cur,
332fe56b9e6SYuval Mintz 		       p_hwfn->mcp_info->mfw_mb_length);
333fe56b9e6SYuval Mintz 
334fe56b9e6SYuval Mintz 		qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
335fe56b9e6SYuval Mintz 	}
336fe56b9e6SYuval Mintz }
337fe56b9e6SYuval Mintz 
338fe56b9e6SYuval Mintz #define FINAL_CLEANUP_CMD_OFFSET        (0)
339fe56b9e6SYuval Mintz #define FINAL_CLEANUP_CMD (0x1)
340fe56b9e6SYuval Mintz #define FINAL_CLEANUP_VALID_OFFSET      (6)
341fe56b9e6SYuval Mintz #define FINAL_CLEANUP_VFPF_ID_SHIFT     (7)
342fe56b9e6SYuval Mintz #define FINAL_CLEANUP_COMP (0x2)
343fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_CNT          (100)
344fe56b9e6SYuval Mintz #define FINAL_CLEANUP_POLL_TIME         (10)
345fe56b9e6SYuval Mintz int qed_final_cleanup(struct qed_hwfn *p_hwfn,
346fe56b9e6SYuval Mintz 		      struct qed_ptt *p_ptt,
347fe56b9e6SYuval Mintz 		      u16 id)
348fe56b9e6SYuval Mintz {
349fe56b9e6SYuval Mintz 	u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
350fe56b9e6SYuval Mintz 	int rc = -EBUSY;
351fe56b9e6SYuval Mintz 
352fe56b9e6SYuval Mintz 	addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET;
353fe56b9e6SYuval Mintz 
354fe56b9e6SYuval Mintz 	command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET;
355fe56b9e6SYuval Mintz 	command |= 1 << FINAL_CLEANUP_VALID_OFFSET;
356fe56b9e6SYuval Mintz 	command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT;
357fe56b9e6SYuval Mintz 	command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT;
358fe56b9e6SYuval Mintz 
359fe56b9e6SYuval Mintz 	/* Make sure notification is not set before initiating final cleanup */
360fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, addr)) {
361fe56b9e6SYuval Mintz 		DP_NOTICE(
362fe56b9e6SYuval Mintz 			p_hwfn,
363fe56b9e6SYuval Mintz 			"Unexpected; Found final cleanup notification before initiating final cleanup\n");
364fe56b9e6SYuval Mintz 		REG_WR(p_hwfn, addr, 0);
365fe56b9e6SYuval Mintz 	}
366fe56b9e6SYuval Mintz 
367fe56b9e6SYuval Mintz 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
368fe56b9e6SYuval Mintz 		   "Sending final cleanup for PFVF[%d] [Command %08x\n]",
369fe56b9e6SYuval Mintz 		   id, command);
370fe56b9e6SYuval Mintz 
371fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
372fe56b9e6SYuval Mintz 
373fe56b9e6SYuval Mintz 	/* Poll until completion */
374fe56b9e6SYuval Mintz 	while (!REG_RD(p_hwfn, addr) && count--)
375fe56b9e6SYuval Mintz 		msleep(FINAL_CLEANUP_POLL_TIME);
376fe56b9e6SYuval Mintz 
377fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, addr))
378fe56b9e6SYuval Mintz 		rc = 0;
379fe56b9e6SYuval Mintz 	else
380fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn,
381fe56b9e6SYuval Mintz 			  "Failed to receive FW final cleanup notification\n");
382fe56b9e6SYuval Mintz 
383fe56b9e6SYuval Mintz 	/* Cleanup afterwards */
384fe56b9e6SYuval Mintz 	REG_WR(p_hwfn, addr, 0);
385fe56b9e6SYuval Mintz 
386fe56b9e6SYuval Mintz 	return rc;
387fe56b9e6SYuval Mintz }
388fe56b9e6SYuval Mintz 
389fe56b9e6SYuval Mintz static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
390fe56b9e6SYuval Mintz {
391fe56b9e6SYuval Mintz 	int hw_mode = 0;
392fe56b9e6SYuval Mintz 
393fe56b9e6SYuval Mintz 	hw_mode = (1 << MODE_BB_A0);
394fe56b9e6SYuval Mintz 
395fe56b9e6SYuval Mintz 	switch (p_hwfn->cdev->num_ports_in_engines) {
396fe56b9e6SYuval Mintz 	case 1:
397fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
398fe56b9e6SYuval Mintz 		break;
399fe56b9e6SYuval Mintz 	case 2:
400fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
401fe56b9e6SYuval Mintz 		break;
402fe56b9e6SYuval Mintz 	case 4:
403fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
404fe56b9e6SYuval Mintz 		break;
405fe56b9e6SYuval Mintz 	default:
406fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
407fe56b9e6SYuval Mintz 			  p_hwfn->cdev->num_ports_in_engines);
408fe56b9e6SYuval Mintz 		return;
409fe56b9e6SYuval Mintz 	}
410fe56b9e6SYuval Mintz 
411fe56b9e6SYuval Mintz 	switch (p_hwfn->cdev->mf_mode) {
412fe56b9e6SYuval Mintz 	case SF:
413fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_SF;
414fe56b9e6SYuval Mintz 		break;
415fe56b9e6SYuval Mintz 	case MF_OVLAN:
416fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SD;
417fe56b9e6SYuval Mintz 		break;
418fe56b9e6SYuval Mintz 	case MF_NPAR:
419fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_MF_SI;
420fe56b9e6SYuval Mintz 		break;
421fe56b9e6SYuval Mintz 	default:
422fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n");
423fe56b9e6SYuval Mintz 		hw_mode |= 1 << MODE_SF;
424fe56b9e6SYuval Mintz 	}
425fe56b9e6SYuval Mintz 
426fe56b9e6SYuval Mintz 	hw_mode |= 1 << MODE_ASIC;
427fe56b9e6SYuval Mintz 
428fe56b9e6SYuval Mintz 	p_hwfn->hw_info.hw_mode = hw_mode;
429fe56b9e6SYuval Mintz }
430fe56b9e6SYuval Mintz 
431fe56b9e6SYuval Mintz /* Init run time data for all PFs on an engine. */
432fe56b9e6SYuval Mintz static void qed_init_cau_rt_data(struct qed_dev *cdev)
433fe56b9e6SYuval Mintz {
434fe56b9e6SYuval Mintz 	u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
435fe56b9e6SYuval Mintz 	int i, sb_id;
436fe56b9e6SYuval Mintz 
437fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
438fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
439fe56b9e6SYuval Mintz 		struct qed_igu_info *p_igu_info;
440fe56b9e6SYuval Mintz 		struct qed_igu_block *p_block;
441fe56b9e6SYuval Mintz 		struct cau_sb_entry sb_entry;
442fe56b9e6SYuval Mintz 
443fe56b9e6SYuval Mintz 		p_igu_info = p_hwfn->hw_info.p_igu_info;
444fe56b9e6SYuval Mintz 
445fe56b9e6SYuval Mintz 		for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
446fe56b9e6SYuval Mintz 		     sb_id++) {
447fe56b9e6SYuval Mintz 			p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
448fe56b9e6SYuval Mintz 			if (!p_block->is_pf)
449fe56b9e6SYuval Mintz 				continue;
450fe56b9e6SYuval Mintz 
451fe56b9e6SYuval Mintz 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
452fe56b9e6SYuval Mintz 					      p_block->function_id,
453fe56b9e6SYuval Mintz 					      0, 0);
454fe56b9e6SYuval Mintz 			STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
455fe56b9e6SYuval Mintz 					 sb_entry);
456fe56b9e6SYuval Mintz 		}
457fe56b9e6SYuval Mintz 	}
458fe56b9e6SYuval Mintz }
459fe56b9e6SYuval Mintz 
460fe56b9e6SYuval Mintz static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
461fe56b9e6SYuval Mintz 			      struct qed_ptt *p_ptt,
462fe56b9e6SYuval Mintz 			      int hw_mode)
463fe56b9e6SYuval Mintz {
464fe56b9e6SYuval Mintz 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
465fe56b9e6SYuval Mintz 	struct qed_qm_common_rt_init_params params;
466fe56b9e6SYuval Mintz 	struct qed_dev *cdev = p_hwfn->cdev;
467fe56b9e6SYuval Mintz 	int rc = 0;
468fe56b9e6SYuval Mintz 
469fe56b9e6SYuval Mintz 	qed_init_cau_rt_data(cdev);
470fe56b9e6SYuval Mintz 
471fe56b9e6SYuval Mintz 	/* Program GTT windows */
472fe56b9e6SYuval Mintz 	qed_gtt_init(p_hwfn);
473fe56b9e6SYuval Mintz 
474fe56b9e6SYuval Mintz 	if (p_hwfn->mcp_info) {
475fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.bandwidth_max)
476fe56b9e6SYuval Mintz 			qm_info->pf_rl_en = 1;
477fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.bandwidth_min)
478fe56b9e6SYuval Mintz 			qm_info->pf_wfq_en = 1;
479fe56b9e6SYuval Mintz 	}
480fe56b9e6SYuval Mintz 
481fe56b9e6SYuval Mintz 	memset(&params, 0, sizeof(params));
482fe56b9e6SYuval Mintz 	params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
483fe56b9e6SYuval Mintz 	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
484fe56b9e6SYuval Mintz 	params.pf_rl_en = qm_info->pf_rl_en;
485fe56b9e6SYuval Mintz 	params.pf_wfq_en = qm_info->pf_wfq_en;
486fe56b9e6SYuval Mintz 	params.vport_rl_en = qm_info->vport_rl_en;
487fe56b9e6SYuval Mintz 	params.vport_wfq_en = qm_info->vport_wfq_en;
488fe56b9e6SYuval Mintz 	params.port_params = qm_info->qm_port_params;
489fe56b9e6SYuval Mintz 
490fe56b9e6SYuval Mintz 	qed_qm_common_rt_init(p_hwfn, &params);
491fe56b9e6SYuval Mintz 
492fe56b9e6SYuval Mintz 	qed_cxt_hw_init_common(p_hwfn);
493fe56b9e6SYuval Mintz 
494fe56b9e6SYuval Mintz 	/* Close gate from NIG to BRB/Storm; By default they are open, but
495fe56b9e6SYuval Mintz 	 * we close them to prevent NIG from passing data to reset blocks.
496fe56b9e6SYuval Mintz 	 * Should have been done in the ENGINE phase, but init-tool lacks
497fe56b9e6SYuval Mintz 	 * proper port-pretend capabilities.
498fe56b9e6SYuval Mintz 	 */
499fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
500fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
501fe56b9e6SYuval Mintz 	qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
502fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
503fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
504fe56b9e6SYuval Mintz 	qed_port_unpretend(p_hwfn, p_ptt);
505fe56b9e6SYuval Mintz 
506fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
507fe56b9e6SYuval Mintz 	if (rc != 0)
508fe56b9e6SYuval Mintz 		return rc;
509fe56b9e6SYuval Mintz 
510fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
511fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
512fe56b9e6SYuval Mintz 
513fe56b9e6SYuval Mintz 	/* Disable relaxed ordering in the PCI config space */
514fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt, 0x20b4,
515fe56b9e6SYuval Mintz 	       qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
516fe56b9e6SYuval Mintz 
517fe56b9e6SYuval Mintz 	return rc;
518fe56b9e6SYuval Mintz }
519fe56b9e6SYuval Mintz 
520fe56b9e6SYuval Mintz static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
521fe56b9e6SYuval Mintz 			    struct qed_ptt *p_ptt,
522fe56b9e6SYuval Mintz 			    int hw_mode)
523fe56b9e6SYuval Mintz {
524fe56b9e6SYuval Mintz 	int rc = 0;
525fe56b9e6SYuval Mintz 
526fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
527fe56b9e6SYuval Mintz 			  hw_mode);
528fe56b9e6SYuval Mintz 	return rc;
529fe56b9e6SYuval Mintz }
530fe56b9e6SYuval Mintz 
531fe56b9e6SYuval Mintz static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
532fe56b9e6SYuval Mintz 			  struct qed_ptt *p_ptt,
533fe56b9e6SYuval Mintz 			  int hw_mode,
534fe56b9e6SYuval Mintz 			  bool b_hw_start,
535fe56b9e6SYuval Mintz 			  enum qed_int_mode int_mode,
536fe56b9e6SYuval Mintz 			  bool allow_npar_tx_switch)
537fe56b9e6SYuval Mintz {
538fe56b9e6SYuval Mintz 	u8 rel_pf_id = p_hwfn->rel_pf_id;
539fe56b9e6SYuval Mintz 	int rc = 0;
540fe56b9e6SYuval Mintz 
541fe56b9e6SYuval Mintz 	if (p_hwfn->mcp_info) {
542fe56b9e6SYuval Mintz 		struct qed_mcp_function_info *p_info;
543fe56b9e6SYuval Mintz 
544fe56b9e6SYuval Mintz 		p_info = &p_hwfn->mcp_info->func_info;
545fe56b9e6SYuval Mintz 		if (p_info->bandwidth_min)
546fe56b9e6SYuval Mintz 			p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
547fe56b9e6SYuval Mintz 
548fe56b9e6SYuval Mintz 		/* Update rate limit once we'll actually have a link */
549fe56b9e6SYuval Mintz 		p_hwfn->qm_info.pf_rl = 100;
550fe56b9e6SYuval Mintz 	}
551fe56b9e6SYuval Mintz 
552fe56b9e6SYuval Mintz 	qed_cxt_hw_init_pf(p_hwfn);
553fe56b9e6SYuval Mintz 
554fe56b9e6SYuval Mintz 	qed_int_igu_init_rt(p_hwfn);
555fe56b9e6SYuval Mintz 
556fe56b9e6SYuval Mintz 	/* Set VLAN in NIG if needed */
557fe56b9e6SYuval Mintz 	if (hw_mode & (1 << MODE_MF_SD)) {
558fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
559fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
560fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
561fe56b9e6SYuval Mintz 			     p_hwfn->hw_info.ovlan);
562fe56b9e6SYuval Mintz 	}
563fe56b9e6SYuval Mintz 
564fe56b9e6SYuval Mintz 	/* Enable classification by MAC if needed */
565fe56b9e6SYuval Mintz 	if (hw_mode & MODE_MF_SI) {
566fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
567fe56b9e6SYuval Mintz 			   "Configuring TAGMAC_CLS_TYPE\n");
568fe56b9e6SYuval Mintz 		STORE_RT_REG(p_hwfn,
569fe56b9e6SYuval Mintz 			     NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
570fe56b9e6SYuval Mintz 	}
571fe56b9e6SYuval Mintz 
572fe56b9e6SYuval Mintz 	/* Protocl Configuration  */
573fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
574fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
575fe56b9e6SYuval Mintz 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
576fe56b9e6SYuval Mintz 
577fe56b9e6SYuval Mintz 	/* Cleanup chip from previous driver if such remains exist */
578fe56b9e6SYuval Mintz 	rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
579fe56b9e6SYuval Mintz 	if (rc != 0)
580fe56b9e6SYuval Mintz 		return rc;
581fe56b9e6SYuval Mintz 
582fe56b9e6SYuval Mintz 	/* PF Init sequence */
583fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
584fe56b9e6SYuval Mintz 	if (rc)
585fe56b9e6SYuval Mintz 		return rc;
586fe56b9e6SYuval Mintz 
587fe56b9e6SYuval Mintz 	/* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
588fe56b9e6SYuval Mintz 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
589fe56b9e6SYuval Mintz 	if (rc)
590fe56b9e6SYuval Mintz 		return rc;
591fe56b9e6SYuval Mintz 
592fe56b9e6SYuval Mintz 	/* Pure runtime initializations - directly to the HW  */
593fe56b9e6SYuval Mintz 	qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
594fe56b9e6SYuval Mintz 
595fe56b9e6SYuval Mintz 	if (b_hw_start) {
596fe56b9e6SYuval Mintz 		/* enable interrupts */
597fe56b9e6SYuval Mintz 		qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
598fe56b9e6SYuval Mintz 
599fe56b9e6SYuval Mintz 		/* send function start command */
600fe56b9e6SYuval Mintz 		rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
601fe56b9e6SYuval Mintz 		if (rc)
602fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
603fe56b9e6SYuval Mintz 	}
604fe56b9e6SYuval Mintz 	return rc;
605fe56b9e6SYuval Mintz }
606fe56b9e6SYuval Mintz 
607fe56b9e6SYuval Mintz static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
608fe56b9e6SYuval Mintz 			       struct qed_ptt *p_ptt,
609fe56b9e6SYuval Mintz 			       u8 enable)
610fe56b9e6SYuval Mintz {
611fe56b9e6SYuval Mintz 	u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
612fe56b9e6SYuval Mintz 
613fe56b9e6SYuval Mintz 	/* Change PF in PXP */
614fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_ptt,
615fe56b9e6SYuval Mintz 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
616fe56b9e6SYuval Mintz 
617fe56b9e6SYuval Mintz 	/* wait until value is set - try for 1 second every 50us */
618fe56b9e6SYuval Mintz 	for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
619fe56b9e6SYuval Mintz 		val = qed_rd(p_hwfn, p_ptt,
620fe56b9e6SYuval Mintz 			     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
621fe56b9e6SYuval Mintz 		if (val == set_val)
622fe56b9e6SYuval Mintz 			break;
623fe56b9e6SYuval Mintz 
624fe56b9e6SYuval Mintz 		usleep_range(50, 60);
625fe56b9e6SYuval Mintz 	}
626fe56b9e6SYuval Mintz 
627fe56b9e6SYuval Mintz 	if (val != set_val) {
628fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn,
629fe56b9e6SYuval Mintz 			  "PFID_ENABLE_MASTER wasn't changed after a second\n");
630fe56b9e6SYuval Mintz 		return -EAGAIN;
631fe56b9e6SYuval Mintz 	}
632fe56b9e6SYuval Mintz 
633fe56b9e6SYuval Mintz 	return 0;
634fe56b9e6SYuval Mintz }
635fe56b9e6SYuval Mintz 
636fe56b9e6SYuval Mintz static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
637fe56b9e6SYuval Mintz 				struct qed_ptt *p_main_ptt)
638fe56b9e6SYuval Mintz {
639fe56b9e6SYuval Mintz 	/* Read shadow of current MFW mailbox */
640fe56b9e6SYuval Mintz 	qed_mcp_read_mb(p_hwfn, p_main_ptt);
641fe56b9e6SYuval Mintz 	memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
642fe56b9e6SYuval Mintz 	       p_hwfn->mcp_info->mfw_mb_cur,
643fe56b9e6SYuval Mintz 	       p_hwfn->mcp_info->mfw_mb_length);
644fe56b9e6SYuval Mintz }
645fe56b9e6SYuval Mintz 
646fe56b9e6SYuval Mintz int qed_hw_init(struct qed_dev *cdev,
647fe56b9e6SYuval Mintz 		bool b_hw_start,
648fe56b9e6SYuval Mintz 		enum qed_int_mode int_mode,
649fe56b9e6SYuval Mintz 		bool allow_npar_tx_switch,
650fe56b9e6SYuval Mintz 		const u8 *bin_fw_data)
651fe56b9e6SYuval Mintz {
652fe56b9e6SYuval Mintz 	u32 load_code, param;
653fe56b9e6SYuval Mintz 	int rc, mfw_rc, i;
654fe56b9e6SYuval Mintz 
655fe56b9e6SYuval Mintz 	rc = qed_init_fw_data(cdev, bin_fw_data);
656fe56b9e6SYuval Mintz 	if (rc != 0)
657fe56b9e6SYuval Mintz 		return rc;
658fe56b9e6SYuval Mintz 
659fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
660fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
661fe56b9e6SYuval Mintz 
662fe56b9e6SYuval Mintz 		/* Enable DMAE in PXP */
663fe56b9e6SYuval Mintz 		rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
664fe56b9e6SYuval Mintz 
665fe56b9e6SYuval Mintz 		qed_calc_hw_mode(p_hwfn);
666fe56b9e6SYuval Mintz 
667fe56b9e6SYuval Mintz 		rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
668fe56b9e6SYuval Mintz 				      &load_code);
669fe56b9e6SYuval Mintz 		if (rc) {
670fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
671fe56b9e6SYuval Mintz 			return rc;
672fe56b9e6SYuval Mintz 		}
673fe56b9e6SYuval Mintz 
674fe56b9e6SYuval Mintz 		qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
675fe56b9e6SYuval Mintz 
676fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
677fe56b9e6SYuval Mintz 			   "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
678fe56b9e6SYuval Mintz 			   rc, load_code);
679fe56b9e6SYuval Mintz 
680fe56b9e6SYuval Mintz 		p_hwfn->first_on_engine = (load_code ==
681fe56b9e6SYuval Mintz 					   FW_MSG_CODE_DRV_LOAD_ENGINE);
682fe56b9e6SYuval Mintz 
683fe56b9e6SYuval Mintz 		switch (load_code) {
684fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_ENGINE:
685fe56b9e6SYuval Mintz 			rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
686fe56b9e6SYuval Mintz 						p_hwfn->hw_info.hw_mode);
687fe56b9e6SYuval Mintz 			if (rc)
688fe56b9e6SYuval Mintz 				break;
689fe56b9e6SYuval Mintz 		/* Fall into */
690fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_PORT:
691fe56b9e6SYuval Mintz 			rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
692fe56b9e6SYuval Mintz 					      p_hwfn->hw_info.hw_mode);
693fe56b9e6SYuval Mintz 			if (rc)
694fe56b9e6SYuval Mintz 				break;
695fe56b9e6SYuval Mintz 
696fe56b9e6SYuval Mintz 		/* Fall into */
697fe56b9e6SYuval Mintz 		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
698fe56b9e6SYuval Mintz 			rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
699fe56b9e6SYuval Mintz 					    p_hwfn->hw_info.hw_mode,
700fe56b9e6SYuval Mintz 					    b_hw_start, int_mode,
701fe56b9e6SYuval Mintz 					    allow_npar_tx_switch);
702fe56b9e6SYuval Mintz 			break;
703fe56b9e6SYuval Mintz 		default:
704fe56b9e6SYuval Mintz 			rc = -EINVAL;
705fe56b9e6SYuval Mintz 			break;
706fe56b9e6SYuval Mintz 		}
707fe56b9e6SYuval Mintz 
708fe56b9e6SYuval Mintz 		if (rc)
709fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn,
710fe56b9e6SYuval Mintz 				  "init phase failed for loadcode 0x%x (rc %d)\n",
711fe56b9e6SYuval Mintz 				   load_code, rc);
712fe56b9e6SYuval Mintz 
713fe56b9e6SYuval Mintz 		/* ACK mfw regardless of success or failure of initialization */
714fe56b9e6SYuval Mintz 		mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
715fe56b9e6SYuval Mintz 				     DRV_MSG_CODE_LOAD_DONE,
716fe56b9e6SYuval Mintz 				     0, &load_code, &param);
717fe56b9e6SYuval Mintz 		if (rc)
718fe56b9e6SYuval Mintz 			return rc;
719fe56b9e6SYuval Mintz 		if (mfw_rc) {
720fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
721fe56b9e6SYuval Mintz 			return mfw_rc;
722fe56b9e6SYuval Mintz 		}
723fe56b9e6SYuval Mintz 
724fe56b9e6SYuval Mintz 		p_hwfn->hw_init_done = true;
725fe56b9e6SYuval Mintz 	}
726fe56b9e6SYuval Mintz 
727fe56b9e6SYuval Mintz 	return 0;
728fe56b9e6SYuval Mintz }
729fe56b9e6SYuval Mintz 
730fe56b9e6SYuval Mintz #define QED_HW_STOP_RETRY_LIMIT (10)
731fe56b9e6SYuval Mintz int qed_hw_stop(struct qed_dev *cdev)
732fe56b9e6SYuval Mintz {
733fe56b9e6SYuval Mintz 	int rc = 0, t_rc;
734fe56b9e6SYuval Mintz 	int i, j;
735fe56b9e6SYuval Mintz 
736fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, j) {
737fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
738fe56b9e6SYuval Mintz 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
739fe56b9e6SYuval Mintz 
740fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
741fe56b9e6SYuval Mintz 
742fe56b9e6SYuval Mintz 		/* mark the hw as uninitialized... */
743fe56b9e6SYuval Mintz 		p_hwfn->hw_init_done = false;
744fe56b9e6SYuval Mintz 
745fe56b9e6SYuval Mintz 		rc = qed_sp_pf_stop(p_hwfn);
746fe56b9e6SYuval Mintz 		if (rc)
747fe56b9e6SYuval Mintz 			return rc;
748fe56b9e6SYuval Mintz 
749fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt,
750fe56b9e6SYuval Mintz 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
751fe56b9e6SYuval Mintz 
752fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
753fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
754fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
755fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
756fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
757fe56b9e6SYuval Mintz 
758fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
759fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
760fe56b9e6SYuval Mintz 		for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
761fe56b9e6SYuval Mintz 			if ((!qed_rd(p_hwfn, p_ptt,
762fe56b9e6SYuval Mintz 				     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
763fe56b9e6SYuval Mintz 			    (!qed_rd(p_hwfn, p_ptt,
764fe56b9e6SYuval Mintz 				     TM_REG_PF_SCAN_ACTIVE_TASK)))
765fe56b9e6SYuval Mintz 				break;
766fe56b9e6SYuval Mintz 
767fe56b9e6SYuval Mintz 			usleep_range(1000, 2000);
768fe56b9e6SYuval Mintz 		}
769fe56b9e6SYuval Mintz 		if (i == QED_HW_STOP_RETRY_LIMIT)
770fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn,
771fe56b9e6SYuval Mintz 				  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
772fe56b9e6SYuval Mintz 				  (u8)qed_rd(p_hwfn, p_ptt,
773fe56b9e6SYuval Mintz 					     TM_REG_PF_SCAN_ACTIVE_CONN),
774fe56b9e6SYuval Mintz 				  (u8)qed_rd(p_hwfn, p_ptt,
775fe56b9e6SYuval Mintz 					     TM_REG_PF_SCAN_ACTIVE_TASK));
776fe56b9e6SYuval Mintz 
777fe56b9e6SYuval Mintz 		/* Disable Attention Generation */
778fe56b9e6SYuval Mintz 		qed_int_igu_disable_int(p_hwfn, p_ptt);
779fe56b9e6SYuval Mintz 
780fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
781fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
782fe56b9e6SYuval Mintz 
783fe56b9e6SYuval Mintz 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
784fe56b9e6SYuval Mintz 
785fe56b9e6SYuval Mintz 		/* Need to wait 1ms to guarantee SBs are cleared */
786fe56b9e6SYuval Mintz 		usleep_range(1000, 2000);
787fe56b9e6SYuval Mintz 	}
788fe56b9e6SYuval Mintz 
789fe56b9e6SYuval Mintz 	/* Disable DMAE in PXP - in CMT, this should only be done for
790fe56b9e6SYuval Mintz 	 * first hw-function, and only after all transactions have
791fe56b9e6SYuval Mintz 	 * stopped for all active hw-functions.
792fe56b9e6SYuval Mintz 	 */
793fe56b9e6SYuval Mintz 	t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
794fe56b9e6SYuval Mintz 				   cdev->hwfns[0].p_main_ptt,
795fe56b9e6SYuval Mintz 				   false);
796fe56b9e6SYuval Mintz 	if (t_rc != 0)
797fe56b9e6SYuval Mintz 		rc = t_rc;
798fe56b9e6SYuval Mintz 
799fe56b9e6SYuval Mintz 	return rc;
800fe56b9e6SYuval Mintz }
801fe56b9e6SYuval Mintz 
802cee4d264SManish Chopra void qed_hw_stop_fastpath(struct qed_dev *cdev)
803cee4d264SManish Chopra {
804cee4d264SManish Chopra 	int i, j;
805cee4d264SManish Chopra 
806cee4d264SManish Chopra 	for_each_hwfn(cdev, j) {
807cee4d264SManish Chopra 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
808cee4d264SManish Chopra 		struct qed_ptt *p_ptt   = p_hwfn->p_main_ptt;
809cee4d264SManish Chopra 
810cee4d264SManish Chopra 		DP_VERBOSE(p_hwfn,
811cee4d264SManish Chopra 			   NETIF_MSG_IFDOWN,
812cee4d264SManish Chopra 			   "Shutting down the fastpath\n");
813cee4d264SManish Chopra 
814cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt,
815cee4d264SManish Chopra 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
816cee4d264SManish Chopra 
817cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
818cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
819cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
820cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
821cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
822cee4d264SManish Chopra 
823cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
824cee4d264SManish Chopra 		qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
825cee4d264SManish Chopra 		for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
826cee4d264SManish Chopra 			if ((!qed_rd(p_hwfn, p_ptt,
827cee4d264SManish Chopra 				     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
828cee4d264SManish Chopra 			    (!qed_rd(p_hwfn, p_ptt,
829cee4d264SManish Chopra 				     TM_REG_PF_SCAN_ACTIVE_TASK)))
830cee4d264SManish Chopra 				break;
831cee4d264SManish Chopra 
832cee4d264SManish Chopra 			usleep_range(1000, 2000);
833cee4d264SManish Chopra 		}
834cee4d264SManish Chopra 		if (i == QED_HW_STOP_RETRY_LIMIT)
835cee4d264SManish Chopra 			DP_NOTICE(p_hwfn,
836cee4d264SManish Chopra 				  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
837cee4d264SManish Chopra 				  (u8)qed_rd(p_hwfn, p_ptt,
838cee4d264SManish Chopra 					     TM_REG_PF_SCAN_ACTIVE_CONN),
839cee4d264SManish Chopra 				  (u8)qed_rd(p_hwfn, p_ptt,
840cee4d264SManish Chopra 					     TM_REG_PF_SCAN_ACTIVE_TASK));
841cee4d264SManish Chopra 
842cee4d264SManish Chopra 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
843cee4d264SManish Chopra 
844cee4d264SManish Chopra 		/* Need to wait 1ms to guarantee SBs are cleared */
845cee4d264SManish Chopra 		usleep_range(1000, 2000);
846cee4d264SManish Chopra 	}
847cee4d264SManish Chopra }
848cee4d264SManish Chopra 
849cee4d264SManish Chopra void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
850cee4d264SManish Chopra {
851cee4d264SManish Chopra 	/* Re-open incoming traffic */
852cee4d264SManish Chopra 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
853cee4d264SManish Chopra 	       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
854cee4d264SManish Chopra }
855cee4d264SManish Chopra 
856fe56b9e6SYuval Mintz static int qed_reg_assert(struct qed_hwfn *hwfn,
857fe56b9e6SYuval Mintz 			  struct qed_ptt *ptt, u32 reg,
858fe56b9e6SYuval Mintz 			  bool expected)
859fe56b9e6SYuval Mintz {
860fe56b9e6SYuval Mintz 	u32 assert_val = qed_rd(hwfn, ptt, reg);
861fe56b9e6SYuval Mintz 
862fe56b9e6SYuval Mintz 	if (assert_val != expected) {
863fe56b9e6SYuval Mintz 		DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
864fe56b9e6SYuval Mintz 			  reg, expected);
865fe56b9e6SYuval Mintz 		return -EINVAL;
866fe56b9e6SYuval Mintz 	}
867fe56b9e6SYuval Mintz 
868fe56b9e6SYuval Mintz 	return 0;
869fe56b9e6SYuval Mintz }
870fe56b9e6SYuval Mintz 
871fe56b9e6SYuval Mintz int qed_hw_reset(struct qed_dev *cdev)
872fe56b9e6SYuval Mintz {
873fe56b9e6SYuval Mintz 	int rc = 0;
874fe56b9e6SYuval Mintz 	u32 unload_resp, unload_param;
875fe56b9e6SYuval Mintz 	int i;
876fe56b9e6SYuval Mintz 
877fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
878fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
879fe56b9e6SYuval Mintz 
880fe56b9e6SYuval Mintz 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
881fe56b9e6SYuval Mintz 
882fe56b9e6SYuval Mintz 		/* Check for incorrect states */
883fe56b9e6SYuval Mintz 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
884fe56b9e6SYuval Mintz 			       QM_REG_USG_CNT_PF_TX, 0);
885fe56b9e6SYuval Mintz 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
886fe56b9e6SYuval Mintz 			       QM_REG_USG_CNT_PF_OTHER, 0);
887fe56b9e6SYuval Mintz 
888fe56b9e6SYuval Mintz 		/* Disable PF in HW blocks */
889fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
890fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
891fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
892fe56b9e6SYuval Mintz 		       TCFC_REG_STRONG_ENABLE_PF, 0);
893fe56b9e6SYuval Mintz 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
894fe56b9e6SYuval Mintz 		       CCFC_REG_STRONG_ENABLE_PF, 0);
895fe56b9e6SYuval Mintz 
896fe56b9e6SYuval Mintz 		/* Send unload command to MCP */
897fe56b9e6SYuval Mintz 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
898fe56b9e6SYuval Mintz 				 DRV_MSG_CODE_UNLOAD_REQ,
899fe56b9e6SYuval Mintz 				 DRV_MB_PARAM_UNLOAD_WOL_MCP,
900fe56b9e6SYuval Mintz 				 &unload_resp, &unload_param);
901fe56b9e6SYuval Mintz 		if (rc) {
902fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
903fe56b9e6SYuval Mintz 			unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
904fe56b9e6SYuval Mintz 		}
905fe56b9e6SYuval Mintz 
906fe56b9e6SYuval Mintz 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
907fe56b9e6SYuval Mintz 				 DRV_MSG_CODE_UNLOAD_DONE,
908fe56b9e6SYuval Mintz 				 0, &unload_resp, &unload_param);
909fe56b9e6SYuval Mintz 		if (rc) {
910fe56b9e6SYuval Mintz 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
911fe56b9e6SYuval Mintz 			return rc;
912fe56b9e6SYuval Mintz 		}
913fe56b9e6SYuval Mintz 	}
914fe56b9e6SYuval Mintz 
915fe56b9e6SYuval Mintz 	return rc;
916fe56b9e6SYuval Mintz }
917fe56b9e6SYuval Mintz 
918fe56b9e6SYuval Mintz /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
919fe56b9e6SYuval Mintz static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
920fe56b9e6SYuval Mintz {
921fe56b9e6SYuval Mintz 	qed_ptt_pool_free(p_hwfn);
922fe56b9e6SYuval Mintz 	kfree(p_hwfn->hw_info.p_igu_info);
923fe56b9e6SYuval Mintz }
924fe56b9e6SYuval Mintz 
925fe56b9e6SYuval Mintz /* Setup bar access */
926fe56b9e6SYuval Mintz static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
927fe56b9e6SYuval Mintz {
928fe56b9e6SYuval Mintz 	int rc;
929fe56b9e6SYuval Mintz 
930fe56b9e6SYuval Mintz 	/* Allocate PTT pool */
931fe56b9e6SYuval Mintz 	rc = qed_ptt_pool_alloc(p_hwfn);
932fe56b9e6SYuval Mintz 	if (rc)
933fe56b9e6SYuval Mintz 		return rc;
934fe56b9e6SYuval Mintz 
935fe56b9e6SYuval Mintz 	/* Allocate the main PTT */
936fe56b9e6SYuval Mintz 	p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
937fe56b9e6SYuval Mintz 
938fe56b9e6SYuval Mintz 	/* clear indirect access */
939fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
940fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
941fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
942fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
943fe56b9e6SYuval Mintz 
944fe56b9e6SYuval Mintz 	/* Clean Previous errors if such exist */
945fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
946fe56b9e6SYuval Mintz 	       PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
947fe56b9e6SYuval Mintz 	       1 << p_hwfn->abs_pf_id);
948fe56b9e6SYuval Mintz 
949fe56b9e6SYuval Mintz 	/* enable internal target-read */
950fe56b9e6SYuval Mintz 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
951fe56b9e6SYuval Mintz 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
952fe56b9e6SYuval Mintz 
953fe56b9e6SYuval Mintz 	return 0;
954fe56b9e6SYuval Mintz }
955fe56b9e6SYuval Mintz 
956fe56b9e6SYuval Mintz static void get_function_id(struct qed_hwfn *p_hwfn)
957fe56b9e6SYuval Mintz {
958fe56b9e6SYuval Mintz 	/* ME Register */
959fe56b9e6SYuval Mintz 	p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
960fe56b9e6SYuval Mintz 
961fe56b9e6SYuval Mintz 	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
962fe56b9e6SYuval Mintz 
963fe56b9e6SYuval Mintz 	p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
964fe56b9e6SYuval Mintz 	p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
965fe56b9e6SYuval Mintz 				      PXP_CONCRETE_FID_PFID);
966fe56b9e6SYuval Mintz 	p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
967fe56b9e6SYuval Mintz 				    PXP_CONCRETE_FID_PORT);
968fe56b9e6SYuval Mintz }
969fe56b9e6SYuval Mintz 
97025c089d7SYuval Mintz static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
97125c089d7SYuval Mintz {
97225c089d7SYuval Mintz 	u32 *feat_num = p_hwfn->hw_info.feat_num;
97325c089d7SYuval Mintz 	int num_features = 1;
97425c089d7SYuval Mintz 
97525c089d7SYuval Mintz 	feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
97625c089d7SYuval Mintz 						num_features,
97725c089d7SYuval Mintz 					RESC_NUM(p_hwfn, QED_L2_QUEUE));
97825c089d7SYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
97925c089d7SYuval Mintz 		   "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
98025c089d7SYuval Mintz 		   feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
98125c089d7SYuval Mintz 		   num_features);
98225c089d7SYuval Mintz }
98325c089d7SYuval Mintz 
984fe56b9e6SYuval Mintz static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
985fe56b9e6SYuval Mintz {
986fe56b9e6SYuval Mintz 	u32 *resc_start = p_hwfn->hw_info.resc_start;
987fe56b9e6SYuval Mintz 	u32 *resc_num = p_hwfn->hw_info.resc_num;
988fe56b9e6SYuval Mintz 	int num_funcs, i;
989fe56b9e6SYuval Mintz 
990fe56b9e6SYuval Mintz 	num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB
991fe56b9e6SYuval Mintz 				  : p_hwfn->cdev->num_ports_in_engines;
992fe56b9e6SYuval Mintz 
993fe56b9e6SYuval Mintz 	resc_num[QED_SB] = min_t(u32,
994fe56b9e6SYuval Mintz 				 (MAX_SB_PER_PATH_BB / num_funcs),
995fe56b9e6SYuval Mintz 				 qed_int_get_num_sbs(p_hwfn, NULL));
99625c089d7SYuval Mintz 	resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
997fe56b9e6SYuval Mintz 	resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
99825c089d7SYuval Mintz 	resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
999fe56b9e6SYuval Mintz 	resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
1000fe56b9e6SYuval Mintz 	resc_num[QED_RL] = 8;
100125c089d7SYuval Mintz 	resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
100225c089d7SYuval Mintz 	resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
100325c089d7SYuval Mintz 			     num_funcs;
1004fe56b9e6SYuval Mintz 	resc_num[QED_ILT] = 950;
1005fe56b9e6SYuval Mintz 
1006fe56b9e6SYuval Mintz 	for (i = 0; i < QED_MAX_RESC; i++)
1007fe56b9e6SYuval Mintz 		resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1008fe56b9e6SYuval Mintz 
100925c089d7SYuval Mintz 	qed_hw_set_feat(p_hwfn);
101025c089d7SYuval Mintz 
1011fe56b9e6SYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1012fe56b9e6SYuval Mintz 		   "The numbers for each resource are:\n"
1013fe56b9e6SYuval Mintz 		   "SB = %d start = %d\n"
101425c089d7SYuval Mintz 		   "L2_QUEUE = %d start = %d\n"
1015fe56b9e6SYuval Mintz 		   "VPORT = %d start = %d\n"
1016fe56b9e6SYuval Mintz 		   "PQ = %d start = %d\n"
1017fe56b9e6SYuval Mintz 		   "RL = %d start = %d\n"
101825c089d7SYuval Mintz 		   "MAC = %d start = %d\n"
101925c089d7SYuval Mintz 		   "VLAN = %d start = %d\n"
1020fe56b9e6SYuval Mintz 		   "ILT = %d start = %d\n",
1021fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_SB],
1022fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_SB],
102325c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
102425c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
1025fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_VPORT],
1026fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_VPORT],
1027fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_PQ],
1028fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_PQ],
1029fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_RL],
1030fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_RL],
103125c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_MAC],
103225c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_MAC],
103325c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_VLAN],
103425c089d7SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_VLAN],
1035fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_num[QED_ILT],
1036fe56b9e6SYuval Mintz 		   p_hwfn->hw_info.resc_start[QED_ILT]);
1037fe56b9e6SYuval Mintz }
1038fe56b9e6SYuval Mintz 
1039fe56b9e6SYuval Mintz static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1040fe56b9e6SYuval Mintz 			       struct qed_ptt *p_ptt)
1041fe56b9e6SYuval Mintz {
1042cc875c2eSYuval Mintz 	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1043cc875c2eSYuval Mintz 	u32 port_cfg_addr, link_temp, val, nvm_cfg_addr;
1044cc875c2eSYuval Mintz 	struct qed_mcp_link_params *link;
1045fe56b9e6SYuval Mintz 
1046fe56b9e6SYuval Mintz 	/* Read global nvm_cfg address */
1047fe56b9e6SYuval Mintz 	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1048fe56b9e6SYuval Mintz 
1049fe56b9e6SYuval Mintz 	/* Verify MCP has initialized it */
1050fe56b9e6SYuval Mintz 	if (!nvm_cfg_addr) {
1051fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1052fe56b9e6SYuval Mintz 		return -EINVAL;
1053fe56b9e6SYuval Mintz 	}
1054fe56b9e6SYuval Mintz 
1055fe56b9e6SYuval Mintz 	/* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
1056fe56b9e6SYuval Mintz 	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1057fe56b9e6SYuval Mintz 
1058fe56b9e6SYuval Mintz 	/* Read Vendor Id / Device Id */
1059fe56b9e6SYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1060fe56b9e6SYuval Mintz 	       offsetof(struct nvm_cfg1, glob) +
1061fe56b9e6SYuval Mintz 	       offsetof(struct nvm_cfg1_glob, pci_id);
1062fe56b9e6SYuval Mintz 	p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) &
1063fe56b9e6SYuval Mintz 				    NVM_CFG1_GLOB_VENDOR_ID_MASK;
1064cc875c2eSYuval Mintz 
1065cc875c2eSYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1066cc875c2eSYuval Mintz 	       offsetof(struct nvm_cfg1, glob) +
1067cc875c2eSYuval Mintz 	       offsetof(struct nvm_cfg1_glob, core_cfg);
1068cc875c2eSYuval Mintz 
1069cc875c2eSYuval Mintz 	core_cfg = qed_rd(p_hwfn, p_ptt, addr);
1070cc875c2eSYuval Mintz 
1071cc875c2eSYuval Mintz 	switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1072cc875c2eSYuval Mintz 		NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1073cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1074cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1075cc875c2eSYuval Mintz 		break;
1076cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1077cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1078cc875c2eSYuval Mintz 		break;
1079cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1080cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1081cc875c2eSYuval Mintz 		break;
1082cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1083cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1084cc875c2eSYuval Mintz 		break;
1085cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1086cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1087cc875c2eSYuval Mintz 		break;
1088cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1089cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1090cc875c2eSYuval Mintz 		break;
1091cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1092cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1093cc875c2eSYuval Mintz 		break;
1094cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1095cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1096cc875c2eSYuval Mintz 		break;
1097cc875c2eSYuval Mintz 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1098cc875c2eSYuval Mintz 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1099cc875c2eSYuval Mintz 		break;
1100cc875c2eSYuval Mintz 	default:
1101cc875c2eSYuval Mintz 		DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
1102cc875c2eSYuval Mintz 			  core_cfg);
1103cc875c2eSYuval Mintz 		break;
1104cc875c2eSYuval Mintz 	}
1105cc875c2eSYuval Mintz 
1106fe56b9e6SYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1107fe56b9e6SYuval Mintz 	       offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) +
1108fe56b9e6SYuval Mintz 	       offsetof(struct nvm_cfg1_func, device_id);
1109fe56b9e6SYuval Mintz 	val = qed_rd(p_hwfn, p_ptt, addr);
1110fe56b9e6SYuval Mintz 
1111fe56b9e6SYuval Mintz 	if (IS_MF(p_hwfn)) {
1112fe56b9e6SYuval Mintz 		p_hwfn->hw_info.device_id =
1113fe56b9e6SYuval Mintz 			(val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >>
1114fe56b9e6SYuval Mintz 			NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET;
1115fe56b9e6SYuval Mintz 	} else {
1116fe56b9e6SYuval Mintz 		p_hwfn->hw_info.device_id =
1117fe56b9e6SYuval Mintz 			(val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >>
1118fe56b9e6SYuval Mintz 			NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET;
1119fe56b9e6SYuval Mintz 	}
1120fe56b9e6SYuval Mintz 
1121cc875c2eSYuval Mintz 	/* Read default link configuration */
1122cc875c2eSYuval Mintz 	link = &p_hwfn->mcp_info->link_input;
1123cc875c2eSYuval Mintz 	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1124cc875c2eSYuval Mintz 			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1125cc875c2eSYuval Mintz 	link_temp = qed_rd(p_hwfn, p_ptt,
1126cc875c2eSYuval Mintz 			   port_cfg_addr +
1127cc875c2eSYuval Mintz 			   offsetof(struct nvm_cfg1_port, speed_cap_mask));
1128cc875c2eSYuval Mintz 	link->speed.advertised_speeds =
1129cc875c2eSYuval Mintz 		link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1130cc875c2eSYuval Mintz 
1131cc875c2eSYuval Mintz 	p_hwfn->mcp_info->link_capabilities.speed_capabilities =
1132cc875c2eSYuval Mintz 						link->speed.advertised_speeds;
1133cc875c2eSYuval Mintz 
1134cc875c2eSYuval Mintz 	link_temp = qed_rd(p_hwfn, p_ptt,
1135cc875c2eSYuval Mintz 			   port_cfg_addr +
1136cc875c2eSYuval Mintz 			   offsetof(struct nvm_cfg1_port, link_settings));
1137cc875c2eSYuval Mintz 	switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1138cc875c2eSYuval Mintz 		NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1139cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1140cc875c2eSYuval Mintz 		link->speed.autoneg = true;
1141cc875c2eSYuval Mintz 		break;
1142cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1143cc875c2eSYuval Mintz 		link->speed.forced_speed = 1000;
1144cc875c2eSYuval Mintz 		break;
1145cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1146cc875c2eSYuval Mintz 		link->speed.forced_speed = 10000;
1147cc875c2eSYuval Mintz 		break;
1148cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1149cc875c2eSYuval Mintz 		link->speed.forced_speed = 25000;
1150cc875c2eSYuval Mintz 		break;
1151cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1152cc875c2eSYuval Mintz 		link->speed.forced_speed = 40000;
1153cc875c2eSYuval Mintz 		break;
1154cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1155cc875c2eSYuval Mintz 		link->speed.forced_speed = 50000;
1156cc875c2eSYuval Mintz 		break;
1157cc875c2eSYuval Mintz 	case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1158cc875c2eSYuval Mintz 		link->speed.forced_speed = 100000;
1159cc875c2eSYuval Mintz 		break;
1160cc875c2eSYuval Mintz 	default:
1161cc875c2eSYuval Mintz 		DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
1162cc875c2eSYuval Mintz 			  link_temp);
1163cc875c2eSYuval Mintz 	}
1164cc875c2eSYuval Mintz 
1165cc875c2eSYuval Mintz 	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1166cc875c2eSYuval Mintz 	link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1167cc875c2eSYuval Mintz 	link->pause.autoneg = !!(link_temp &
1168cc875c2eSYuval Mintz 				 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1169cc875c2eSYuval Mintz 	link->pause.forced_rx = !!(link_temp &
1170cc875c2eSYuval Mintz 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1171cc875c2eSYuval Mintz 	link->pause.forced_tx = !!(link_temp &
1172cc875c2eSYuval Mintz 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1173cc875c2eSYuval Mintz 	link->loopback_mode = 0;
1174cc875c2eSYuval Mintz 
1175cc875c2eSYuval Mintz 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1176cc875c2eSYuval Mintz 		   "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1177cc875c2eSYuval Mintz 		   link->speed.forced_speed, link->speed.advertised_speeds,
1178cc875c2eSYuval Mintz 		   link->speed.autoneg, link->pause.autoneg);
1179cc875c2eSYuval Mintz 
1180fe56b9e6SYuval Mintz 	/* Read Multi-function information from shmem */
1181fe56b9e6SYuval Mintz 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1182fe56b9e6SYuval Mintz 	       offsetof(struct nvm_cfg1, glob) +
1183fe56b9e6SYuval Mintz 	       offsetof(struct nvm_cfg1_glob, generic_cont0);
1184fe56b9e6SYuval Mintz 
1185fe56b9e6SYuval Mintz 	generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1186fe56b9e6SYuval Mintz 
1187fe56b9e6SYuval Mintz 	mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1188fe56b9e6SYuval Mintz 		  NVM_CFG1_GLOB_MF_MODE_OFFSET;
1189fe56b9e6SYuval Mintz 
1190fe56b9e6SYuval Mintz 	switch (mf_mode) {
1191fe56b9e6SYuval Mintz 	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1192fe56b9e6SYuval Mintz 		p_hwfn->cdev->mf_mode = MF_OVLAN;
1193fe56b9e6SYuval Mintz 		break;
1194fe56b9e6SYuval Mintz 	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1195fe56b9e6SYuval Mintz 		p_hwfn->cdev->mf_mode = MF_NPAR;
1196fe56b9e6SYuval Mintz 		break;
1197fe56b9e6SYuval Mintz 	case NVM_CFG1_GLOB_MF_MODE_FORCED_SF:
1198fe56b9e6SYuval Mintz 		p_hwfn->cdev->mf_mode = SF;
1199fe56b9e6SYuval Mintz 		break;
1200fe56b9e6SYuval Mintz 	}
1201fe56b9e6SYuval Mintz 	DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1202fe56b9e6SYuval Mintz 		p_hwfn->cdev->mf_mode);
1203fe56b9e6SYuval Mintz 
1204fe56b9e6SYuval Mintz 	return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1205fe56b9e6SYuval Mintz }
1206fe56b9e6SYuval Mintz 
1207fe56b9e6SYuval Mintz static int
1208fe56b9e6SYuval Mintz qed_get_hw_info(struct qed_hwfn *p_hwfn,
1209fe56b9e6SYuval Mintz 		struct qed_ptt *p_ptt,
1210fe56b9e6SYuval Mintz 		enum qed_pci_personality personality)
1211fe56b9e6SYuval Mintz {
1212fe56b9e6SYuval Mintz 	u32 port_mode;
1213fe56b9e6SYuval Mintz 	int rc;
1214fe56b9e6SYuval Mintz 
1215fe56b9e6SYuval Mintz 	/* Read the port mode */
1216fe56b9e6SYuval Mintz 	port_mode = qed_rd(p_hwfn, p_ptt,
1217fe56b9e6SYuval Mintz 			   CNIG_REG_NW_PORT_MODE_BB_B0);
1218fe56b9e6SYuval Mintz 
1219fe56b9e6SYuval Mintz 	if (port_mode < 3) {
1220fe56b9e6SYuval Mintz 		p_hwfn->cdev->num_ports_in_engines = 1;
1221fe56b9e6SYuval Mintz 	} else if (port_mode <= 5) {
1222fe56b9e6SYuval Mintz 		p_hwfn->cdev->num_ports_in_engines = 2;
1223fe56b9e6SYuval Mintz 	} else {
1224fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1225fe56b9e6SYuval Mintz 			  p_hwfn->cdev->num_ports_in_engines);
1226fe56b9e6SYuval Mintz 
1227fe56b9e6SYuval Mintz 		/* Default num_ports_in_engines to something */
1228fe56b9e6SYuval Mintz 		p_hwfn->cdev->num_ports_in_engines = 1;
1229fe56b9e6SYuval Mintz 	}
1230fe56b9e6SYuval Mintz 
1231fe56b9e6SYuval Mintz 	qed_hw_get_nvm_info(p_hwfn, p_ptt);
1232fe56b9e6SYuval Mintz 
1233fe56b9e6SYuval Mintz 	rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1234fe56b9e6SYuval Mintz 	if (rc)
1235fe56b9e6SYuval Mintz 		return rc;
1236fe56b9e6SYuval Mintz 
1237fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn))
1238fe56b9e6SYuval Mintz 		ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1239fe56b9e6SYuval Mintz 				p_hwfn->mcp_info->func_info.mac);
1240fe56b9e6SYuval Mintz 	else
1241fe56b9e6SYuval Mintz 		eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1242fe56b9e6SYuval Mintz 
1243fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn)) {
1244fe56b9e6SYuval Mintz 		if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1245fe56b9e6SYuval Mintz 			p_hwfn->hw_info.ovlan =
1246fe56b9e6SYuval Mintz 				p_hwfn->mcp_info->func_info.ovlan;
1247fe56b9e6SYuval Mintz 
1248fe56b9e6SYuval Mintz 		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1249fe56b9e6SYuval Mintz 	}
1250fe56b9e6SYuval Mintz 
1251fe56b9e6SYuval Mintz 	if (qed_mcp_is_init(p_hwfn)) {
1252fe56b9e6SYuval Mintz 		enum qed_pci_personality protocol;
1253fe56b9e6SYuval Mintz 
1254fe56b9e6SYuval Mintz 		protocol = p_hwfn->mcp_info->func_info.protocol;
1255fe56b9e6SYuval Mintz 		p_hwfn->hw_info.personality = protocol;
1256fe56b9e6SYuval Mintz 	}
1257fe56b9e6SYuval Mintz 
1258fe56b9e6SYuval Mintz 	qed_hw_get_resc(p_hwfn);
1259fe56b9e6SYuval Mintz 
1260fe56b9e6SYuval Mintz 	return rc;
1261fe56b9e6SYuval Mintz }
1262fe56b9e6SYuval Mintz 
1263fe56b9e6SYuval Mintz static void qed_get_dev_info(struct qed_dev *cdev)
1264fe56b9e6SYuval Mintz {
1265fe56b9e6SYuval Mintz 	u32 tmp;
1266fe56b9e6SYuval Mintz 
1267fe56b9e6SYuval Mintz 	cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1268fe56b9e6SYuval Mintz 				     MISCS_REG_CHIP_NUM);
1269fe56b9e6SYuval Mintz 	cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1270fe56b9e6SYuval Mintz 				     MISCS_REG_CHIP_REV);
1271fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_REV, cdev->chip_rev);
1272fe56b9e6SYuval Mintz 
1273fe56b9e6SYuval Mintz 	/* Learn number of HW-functions */
1274fe56b9e6SYuval Mintz 	tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1275fe56b9e6SYuval Mintz 		     MISCS_REG_CMT_ENABLED_FOR_PAIR);
1276fe56b9e6SYuval Mintz 
1277fe56b9e6SYuval Mintz 	if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) {
1278fe56b9e6SYuval Mintz 		DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1279fe56b9e6SYuval Mintz 		cdev->num_hwfns = 2;
1280fe56b9e6SYuval Mintz 	} else {
1281fe56b9e6SYuval Mintz 		cdev->num_hwfns = 1;
1282fe56b9e6SYuval Mintz 	}
1283fe56b9e6SYuval Mintz 
1284fe56b9e6SYuval Mintz 	cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1285fe56b9e6SYuval Mintz 				    MISCS_REG_CHIP_TEST_REG) >> 4;
1286fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
1287fe56b9e6SYuval Mintz 	cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt,
1288fe56b9e6SYuval Mintz 				       MISCS_REG_CHIP_METAL);
1289fe56b9e6SYuval Mintz 	MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1290fe56b9e6SYuval Mintz 
1291fe56b9e6SYuval Mintz 	DP_INFO(cdev->hwfns,
1292fe56b9e6SYuval Mintz 		"Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1293fe56b9e6SYuval Mintz 		cdev->chip_num, cdev->chip_rev,
1294fe56b9e6SYuval Mintz 		cdev->chip_bond_id, cdev->chip_metal);
1295fe56b9e6SYuval Mintz }
1296fe56b9e6SYuval Mintz 
1297fe56b9e6SYuval Mintz static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1298fe56b9e6SYuval Mintz 				 void __iomem *p_regview,
1299fe56b9e6SYuval Mintz 				 void __iomem *p_doorbells,
1300fe56b9e6SYuval Mintz 				 enum qed_pci_personality personality)
1301fe56b9e6SYuval Mintz {
1302fe56b9e6SYuval Mintz 	int rc = 0;
1303fe56b9e6SYuval Mintz 
1304fe56b9e6SYuval Mintz 	/* Split PCI bars evenly between hwfns */
1305fe56b9e6SYuval Mintz 	p_hwfn->regview = p_regview;
1306fe56b9e6SYuval Mintz 	p_hwfn->doorbells = p_doorbells;
1307fe56b9e6SYuval Mintz 
1308fe56b9e6SYuval Mintz 	/* Validate that chip access is feasible */
1309fe56b9e6SYuval Mintz 	if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1310fe56b9e6SYuval Mintz 		DP_ERR(p_hwfn,
1311fe56b9e6SYuval Mintz 		       "Reading the ME register returns all Fs; Preventing further chip access\n");
1312fe56b9e6SYuval Mintz 		return -EINVAL;
1313fe56b9e6SYuval Mintz 	}
1314fe56b9e6SYuval Mintz 
1315fe56b9e6SYuval Mintz 	get_function_id(p_hwfn);
1316fe56b9e6SYuval Mintz 
1317fe56b9e6SYuval Mintz 	rc = qed_hw_hwfn_prepare(p_hwfn);
1318fe56b9e6SYuval Mintz 	if (rc) {
1319fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1320fe56b9e6SYuval Mintz 		goto err0;
1321fe56b9e6SYuval Mintz 	}
1322fe56b9e6SYuval Mintz 
1323fe56b9e6SYuval Mintz 	/* First hwfn learns basic information, e.g., number of hwfns */
1324fe56b9e6SYuval Mintz 	if (!p_hwfn->my_id)
1325fe56b9e6SYuval Mintz 		qed_get_dev_info(p_hwfn->cdev);
1326fe56b9e6SYuval Mintz 
1327fe56b9e6SYuval Mintz 	/* Initialize MCP structure */
1328fe56b9e6SYuval Mintz 	rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1329fe56b9e6SYuval Mintz 	if (rc) {
1330fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1331fe56b9e6SYuval Mintz 		goto err1;
1332fe56b9e6SYuval Mintz 	}
1333fe56b9e6SYuval Mintz 
1334fe56b9e6SYuval Mintz 	/* Read the device configuration information from the HW and SHMEM */
1335fe56b9e6SYuval Mintz 	rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1336fe56b9e6SYuval Mintz 	if (rc) {
1337fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1338fe56b9e6SYuval Mintz 		goto err2;
1339fe56b9e6SYuval Mintz 	}
1340fe56b9e6SYuval Mintz 
1341fe56b9e6SYuval Mintz 	/* Allocate the init RT array and initialize the init-ops engine */
1342fe56b9e6SYuval Mintz 	rc = qed_init_alloc(p_hwfn);
1343fe56b9e6SYuval Mintz 	if (rc) {
1344fe56b9e6SYuval Mintz 		DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1345fe56b9e6SYuval Mintz 		goto err2;
1346fe56b9e6SYuval Mintz 	}
1347fe56b9e6SYuval Mintz 
1348fe56b9e6SYuval Mintz 	return rc;
1349fe56b9e6SYuval Mintz err2:
1350fe56b9e6SYuval Mintz 	qed_mcp_free(p_hwfn);
1351fe56b9e6SYuval Mintz err1:
1352fe56b9e6SYuval Mintz 	qed_hw_hwfn_free(p_hwfn);
1353fe56b9e6SYuval Mintz err0:
1354fe56b9e6SYuval Mintz 	return rc;
1355fe56b9e6SYuval Mintz }
1356fe56b9e6SYuval Mintz 
1357fe56b9e6SYuval Mintz static u32 qed_hw_bar_size(struct qed_dev *cdev,
1358fe56b9e6SYuval Mintz 			   u8 bar_id)
1359fe56b9e6SYuval Mintz {
1360fe56b9e6SYuval Mintz 	u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0);
1361fe56b9e6SYuval Mintz 
1362fe56b9e6SYuval Mintz 	return size / cdev->num_hwfns;
1363fe56b9e6SYuval Mintz }
1364fe56b9e6SYuval Mintz 
1365fe56b9e6SYuval Mintz int qed_hw_prepare(struct qed_dev *cdev,
1366fe56b9e6SYuval Mintz 		   int personality)
1367fe56b9e6SYuval Mintz {
1368fe56b9e6SYuval Mintz 	int rc, i;
1369fe56b9e6SYuval Mintz 
1370fe56b9e6SYuval Mintz 	/* Store the precompiled init data ptrs */
1371fe56b9e6SYuval Mintz 	qed_init_iro_array(cdev);
1372fe56b9e6SYuval Mintz 
1373fe56b9e6SYuval Mintz 	/* Initialize the first hwfn - will learn number of hwfns */
1374fe56b9e6SYuval Mintz 	rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview,
1375fe56b9e6SYuval Mintz 				   cdev->doorbells, personality);
1376fe56b9e6SYuval Mintz 	if (rc)
1377fe56b9e6SYuval Mintz 		return rc;
1378fe56b9e6SYuval Mintz 
1379fe56b9e6SYuval Mintz 	personality = cdev->hwfns[0].hw_info.personality;
1380fe56b9e6SYuval Mintz 
1381fe56b9e6SYuval Mintz 	/* Initialize the rest of the hwfns */
1382fe56b9e6SYuval Mintz 	for (i = 1; i < cdev->num_hwfns; i++) {
1383fe56b9e6SYuval Mintz 		void __iomem *p_regview, *p_doorbell;
1384fe56b9e6SYuval Mintz 
1385fe56b9e6SYuval Mintz 		p_regview =  cdev->regview +
1386fe56b9e6SYuval Mintz 			     i * qed_hw_bar_size(cdev, 0);
1387fe56b9e6SYuval Mintz 		p_doorbell = cdev->doorbells +
1388fe56b9e6SYuval Mintz 			     i * qed_hw_bar_size(cdev, 1);
1389fe56b9e6SYuval Mintz 		rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview,
1390fe56b9e6SYuval Mintz 					   p_doorbell, personality);
1391fe56b9e6SYuval Mintz 		if (rc) {
1392fe56b9e6SYuval Mintz 			/* Cleanup previously initialized hwfns */
1393fe56b9e6SYuval Mintz 			while (--i >= 0) {
1394fe56b9e6SYuval Mintz 				qed_init_free(&cdev->hwfns[i]);
1395fe56b9e6SYuval Mintz 				qed_mcp_free(&cdev->hwfns[i]);
1396fe56b9e6SYuval Mintz 				qed_hw_hwfn_free(&cdev->hwfns[i]);
1397fe56b9e6SYuval Mintz 			}
1398fe56b9e6SYuval Mintz 			return rc;
1399fe56b9e6SYuval Mintz 		}
1400fe56b9e6SYuval Mintz 	}
1401fe56b9e6SYuval Mintz 
1402fe56b9e6SYuval Mintz 	return 0;
1403fe56b9e6SYuval Mintz }
1404fe56b9e6SYuval Mintz 
1405fe56b9e6SYuval Mintz void qed_hw_remove(struct qed_dev *cdev)
1406fe56b9e6SYuval Mintz {
1407fe56b9e6SYuval Mintz 	int i;
1408fe56b9e6SYuval Mintz 
1409fe56b9e6SYuval Mintz 	for_each_hwfn(cdev, i) {
1410fe56b9e6SYuval Mintz 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1411fe56b9e6SYuval Mintz 
1412fe56b9e6SYuval Mintz 		qed_init_free(p_hwfn);
1413fe56b9e6SYuval Mintz 		qed_hw_hwfn_free(p_hwfn);
1414fe56b9e6SYuval Mintz 		qed_mcp_free(p_hwfn);
1415fe56b9e6SYuval Mintz 	}
1416fe56b9e6SYuval Mintz }
1417fe56b9e6SYuval Mintz 
1418fe56b9e6SYuval Mintz int qed_chain_alloc(struct qed_dev *cdev,
1419fe56b9e6SYuval Mintz 		    enum qed_chain_use_mode intended_use,
1420fe56b9e6SYuval Mintz 		    enum qed_chain_mode mode,
1421fe56b9e6SYuval Mintz 		    u16 num_elems,
1422fe56b9e6SYuval Mintz 		    size_t elem_size,
1423fe56b9e6SYuval Mintz 		    struct qed_chain *p_chain)
1424fe56b9e6SYuval Mintz {
1425fe56b9e6SYuval Mintz 	dma_addr_t p_pbl_phys = 0;
1426fe56b9e6SYuval Mintz 	void *p_pbl_virt = NULL;
1427fe56b9e6SYuval Mintz 	dma_addr_t p_phys = 0;
1428fe56b9e6SYuval Mintz 	void *p_virt = NULL;
1429fe56b9e6SYuval Mintz 	u16 page_cnt = 0;
1430fe56b9e6SYuval Mintz 	size_t size;
1431fe56b9e6SYuval Mintz 
1432fe56b9e6SYuval Mintz 	if (mode == QED_CHAIN_MODE_SINGLE)
1433fe56b9e6SYuval Mintz 		page_cnt = 1;
1434fe56b9e6SYuval Mintz 	else
1435fe56b9e6SYuval Mintz 		page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1436fe56b9e6SYuval Mintz 
1437fe56b9e6SYuval Mintz 	size = page_cnt * QED_CHAIN_PAGE_SIZE;
1438fe56b9e6SYuval Mintz 	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1439fe56b9e6SYuval Mintz 				    size, &p_phys, GFP_KERNEL);
1440fe56b9e6SYuval Mintz 	if (!p_virt) {
1441fe56b9e6SYuval Mintz 		DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1442fe56b9e6SYuval Mintz 		goto nomem;
1443fe56b9e6SYuval Mintz 	}
1444fe56b9e6SYuval Mintz 
1445fe56b9e6SYuval Mintz 	if (mode == QED_CHAIN_MODE_PBL) {
1446fe56b9e6SYuval Mintz 		size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1447fe56b9e6SYuval Mintz 		p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1448fe56b9e6SYuval Mintz 						size, &p_pbl_phys,
1449fe56b9e6SYuval Mintz 						GFP_KERNEL);
1450fe56b9e6SYuval Mintz 		if (!p_pbl_virt) {
1451fe56b9e6SYuval Mintz 			DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1452fe56b9e6SYuval Mintz 			goto nomem;
1453fe56b9e6SYuval Mintz 		}
1454fe56b9e6SYuval Mintz 
1455fe56b9e6SYuval Mintz 		qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1456fe56b9e6SYuval Mintz 				   (u8)elem_size, intended_use,
1457fe56b9e6SYuval Mintz 				   p_pbl_phys, p_pbl_virt);
1458fe56b9e6SYuval Mintz 	} else {
1459fe56b9e6SYuval Mintz 		qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1460fe56b9e6SYuval Mintz 			       (u8)elem_size, intended_use, mode);
1461fe56b9e6SYuval Mintz 	}
1462fe56b9e6SYuval Mintz 
1463fe56b9e6SYuval Mintz 	return 0;
1464fe56b9e6SYuval Mintz 
1465fe56b9e6SYuval Mintz nomem:
1466fe56b9e6SYuval Mintz 	dma_free_coherent(&cdev->pdev->dev,
1467fe56b9e6SYuval Mintz 			  page_cnt * QED_CHAIN_PAGE_SIZE,
1468fe56b9e6SYuval Mintz 			  p_virt, p_phys);
1469fe56b9e6SYuval Mintz 	dma_free_coherent(&cdev->pdev->dev,
1470fe56b9e6SYuval Mintz 			  page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1471fe56b9e6SYuval Mintz 			  p_pbl_virt, p_pbl_phys);
1472fe56b9e6SYuval Mintz 
1473fe56b9e6SYuval Mintz 	return -ENOMEM;
1474fe56b9e6SYuval Mintz }
1475fe56b9e6SYuval Mintz 
1476fe56b9e6SYuval Mintz void qed_chain_free(struct qed_dev *cdev,
1477fe56b9e6SYuval Mintz 		    struct qed_chain *p_chain)
1478fe56b9e6SYuval Mintz {
1479fe56b9e6SYuval Mintz 	size_t size;
1480fe56b9e6SYuval Mintz 
1481fe56b9e6SYuval Mintz 	if (!p_chain->p_virt_addr)
1482fe56b9e6SYuval Mintz 		return;
1483fe56b9e6SYuval Mintz 
1484fe56b9e6SYuval Mintz 	if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1485fe56b9e6SYuval Mintz 		size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1486fe56b9e6SYuval Mintz 		dma_free_coherent(&cdev->pdev->dev, size,
1487fe56b9e6SYuval Mintz 				  p_chain->pbl.p_virt_table,
1488fe56b9e6SYuval Mintz 				  p_chain->pbl.p_phys_table);
1489fe56b9e6SYuval Mintz 	}
1490fe56b9e6SYuval Mintz 
1491fe56b9e6SYuval Mintz 	size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1492fe56b9e6SYuval Mintz 	dma_free_coherent(&cdev->pdev->dev, size,
1493fe56b9e6SYuval Mintz 			  p_chain->p_virt_addr,
1494fe56b9e6SYuval Mintz 			  p_chain->p_phys_addr);
1495fe56b9e6SYuval Mintz }
1496cee4d264SManish Chopra 
1497cee4d264SManish Chopra int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
1498cee4d264SManish Chopra 		    u16 src_id, u16 *dst_id)
1499cee4d264SManish Chopra {
1500cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
1501cee4d264SManish Chopra 		u16 min, max;
1502cee4d264SManish Chopra 
1503cee4d264SManish Chopra 		min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
1504cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
1505cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
1506cee4d264SManish Chopra 			  "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1507cee4d264SManish Chopra 			  src_id, min, max);
1508cee4d264SManish Chopra 
1509cee4d264SManish Chopra 		return -EINVAL;
1510cee4d264SManish Chopra 	}
1511cee4d264SManish Chopra 
1512cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
1513cee4d264SManish Chopra 
1514cee4d264SManish Chopra 	return 0;
1515cee4d264SManish Chopra }
1516cee4d264SManish Chopra 
1517cee4d264SManish Chopra int qed_fw_vport(struct qed_hwfn *p_hwfn,
1518cee4d264SManish Chopra 		 u8 src_id, u8 *dst_id)
1519cee4d264SManish Chopra {
1520cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
1521cee4d264SManish Chopra 		u8 min, max;
1522cee4d264SManish Chopra 
1523cee4d264SManish Chopra 		min = (u8)RESC_START(p_hwfn, QED_VPORT);
1524cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_VPORT);
1525cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
1526cee4d264SManish Chopra 			  "vport id [%d] is not valid, available indices [%d - %d]\n",
1527cee4d264SManish Chopra 			  src_id, min, max);
1528cee4d264SManish Chopra 
1529cee4d264SManish Chopra 		return -EINVAL;
1530cee4d264SManish Chopra 	}
1531cee4d264SManish Chopra 
1532cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
1533cee4d264SManish Chopra 
1534cee4d264SManish Chopra 	return 0;
1535cee4d264SManish Chopra }
1536cee4d264SManish Chopra 
1537cee4d264SManish Chopra int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
1538cee4d264SManish Chopra 		   u8 src_id, u8 *dst_id)
1539cee4d264SManish Chopra {
1540cee4d264SManish Chopra 	if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
1541cee4d264SManish Chopra 		u8 min, max;
1542cee4d264SManish Chopra 
1543cee4d264SManish Chopra 		min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
1544cee4d264SManish Chopra 		max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
1545cee4d264SManish Chopra 		DP_NOTICE(p_hwfn,
1546cee4d264SManish Chopra 			  "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1547cee4d264SManish Chopra 			  src_id, min, max);
1548cee4d264SManish Chopra 
1549cee4d264SManish Chopra 		return -EINVAL;
1550cee4d264SManish Chopra 	}
1551cee4d264SManish Chopra 
1552cee4d264SManish Chopra 	*dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
1553cee4d264SManish Chopra 
1554cee4d264SManish Chopra 	return 0;
1555cee4d264SManish Chopra }
1556