1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8 
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/io.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/etherdevice.h>
21 #include <linux/qed/qed_chain.h>
22 #include <linux/qed/qed_if.h>
23 #include "qed.h"
24 #include "qed_cxt.h"
25 #include "qed_dev_api.h"
26 #include "qed_hsi.h"
27 #include "qed_hw.h"
28 #include "qed_init_ops.h"
29 #include "qed_int.h"
30 #include "qed_mcp.h"
31 #include "qed_reg_addr.h"
32 #include "qed_sp.h"
33 
34 /* API common to all protocols */
35 enum BAR_ID {
36 	BAR_ID_0,       /* used for GRC */
37 	BAR_ID_1        /* Used for doorbells */
38 };
39 
40 static u32 qed_hw_bar_size(struct qed_hwfn	*p_hwfn,
41 			   enum BAR_ID		bar_id)
42 {
43 	u32	bar_reg = (bar_id == BAR_ID_0 ?
44 			   PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
45 	u32	val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
46 
47 	if (val)
48 		return 1 << (val + 15);
49 
50 	/* Old MFW initialized above registered only conditionally */
51 	if (p_hwfn->cdev->num_hwfns > 1) {
52 		DP_INFO(p_hwfn,
53 			"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
54 			return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
55 	} else {
56 		DP_INFO(p_hwfn,
57 			"BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
58 			return 512 * 1024;
59 	}
60 }
61 
62 void qed_init_dp(struct qed_dev *cdev,
63 		 u32 dp_module, u8 dp_level)
64 {
65 	u32 i;
66 
67 	cdev->dp_level = dp_level;
68 	cdev->dp_module = dp_module;
69 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
70 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
71 
72 		p_hwfn->dp_level = dp_level;
73 		p_hwfn->dp_module = dp_module;
74 	}
75 }
76 
77 void qed_init_struct(struct qed_dev *cdev)
78 {
79 	u8 i;
80 
81 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
82 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
83 
84 		p_hwfn->cdev = cdev;
85 		p_hwfn->my_id = i;
86 		p_hwfn->b_active = false;
87 
88 		mutex_init(&p_hwfn->dmae_info.mutex);
89 	}
90 
91 	/* hwfn 0 is always active */
92 	cdev->hwfns[0].b_active = true;
93 
94 	/* set the default cache alignment to 128 */
95 	cdev->cache_shift = 7;
96 }
97 
98 static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
99 {
100 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
101 
102 	kfree(qm_info->qm_pq_params);
103 	qm_info->qm_pq_params = NULL;
104 	kfree(qm_info->qm_vport_params);
105 	qm_info->qm_vport_params = NULL;
106 	kfree(qm_info->qm_port_params);
107 	qm_info->qm_port_params = NULL;
108 }
109 
110 void qed_resc_free(struct qed_dev *cdev)
111 {
112 	int i;
113 
114 	kfree(cdev->fw_data);
115 	cdev->fw_data = NULL;
116 
117 	kfree(cdev->reset_stats);
118 
119 	for_each_hwfn(cdev, i) {
120 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
121 
122 		kfree(p_hwfn->p_tx_cids);
123 		p_hwfn->p_tx_cids = NULL;
124 		kfree(p_hwfn->p_rx_cids);
125 		p_hwfn->p_rx_cids = NULL;
126 	}
127 
128 	for_each_hwfn(cdev, i) {
129 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
130 
131 		qed_cxt_mngr_free(p_hwfn);
132 		qed_qm_info_free(p_hwfn);
133 		qed_spq_free(p_hwfn);
134 		qed_eq_free(p_hwfn, p_hwfn->p_eq);
135 		qed_consq_free(p_hwfn, p_hwfn->p_consq);
136 		qed_int_free(p_hwfn);
137 		qed_dmae_info_free(p_hwfn);
138 	}
139 }
140 
141 static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
142 {
143 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
144 	struct init_qm_port_params *p_qm_port;
145 	u8 num_vports, i, vport_id, num_ports;
146 	u16 num_pqs, multi_cos_tcs = 1;
147 
148 	memset(qm_info, 0, sizeof(*qm_info));
149 
150 	num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
151 	num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
152 
153 	/* Sanity checking that setup requires legal number of resources */
154 	if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
155 		DP_ERR(p_hwfn,
156 		       "Need too many Physical queues - 0x%04x when only %04x are available\n",
157 		       num_pqs, RESC_NUM(p_hwfn, QED_PQ));
158 		return -EINVAL;
159 	}
160 
161 	/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
162 	 */
163 	qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
164 					num_pqs, GFP_KERNEL);
165 	if (!qm_info->qm_pq_params)
166 		goto alloc_err;
167 
168 	qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
169 					   num_vports, GFP_KERNEL);
170 	if (!qm_info->qm_vport_params)
171 		goto alloc_err;
172 
173 	qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
174 					  MAX_NUM_PORTS, GFP_KERNEL);
175 	if (!qm_info->qm_port_params)
176 		goto alloc_err;
177 
178 	vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
179 
180 	/* First init per-TC PQs */
181 	for (i = 0; i < multi_cos_tcs; i++) {
182 		struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
183 
184 		params->vport_id = vport_id;
185 		params->tc_id = p_hwfn->hw_info.non_offload_tc;
186 		params->wrr_group = 1;
187 	}
188 
189 	/* Then init pure-LB PQ */
190 	qm_info->pure_lb_pq = i;
191 	qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
192 	qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
193 	qm_info->qm_pq_params[i].wrr_group = 1;
194 	i++;
195 
196 	qm_info->offload_pq = 0;
197 	qm_info->num_pqs = num_pqs;
198 	qm_info->num_vports = num_vports;
199 
200 	/* Initialize qm port parameters */
201 	num_ports = p_hwfn->cdev->num_ports_in_engines;
202 	for (i = 0; i < num_ports; i++) {
203 		p_qm_port = &qm_info->qm_port_params[i];
204 		p_qm_port->active = 1;
205 		p_qm_port->num_active_phys_tcs = 4;
206 		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
207 		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
208 	}
209 
210 	qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
211 
212 	qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
213 
214 	qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
215 
216 	qm_info->pf_wfq = 0;
217 	qm_info->pf_rl = 0;
218 	qm_info->vport_rl_en = 1;
219 
220 	return 0;
221 
222 alloc_err:
223 	DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
224 	kfree(qm_info->qm_pq_params);
225 	kfree(qm_info->qm_vport_params);
226 	kfree(qm_info->qm_port_params);
227 
228 	return -ENOMEM;
229 }
230 
231 int qed_resc_alloc(struct qed_dev *cdev)
232 {
233 	struct qed_consq *p_consq;
234 	struct qed_eq *p_eq;
235 	int i, rc = 0;
236 
237 	cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
238 	if (!cdev->fw_data)
239 		return -ENOMEM;
240 
241 	/* Allocate Memory for the Queue->CID mapping */
242 	for_each_hwfn(cdev, i) {
243 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
244 		int tx_size = sizeof(struct qed_hw_cid_data) *
245 				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
246 		int rx_size = sizeof(struct qed_hw_cid_data) *
247 				     RESC_NUM(p_hwfn, QED_L2_QUEUE);
248 
249 		p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
250 		if (!p_hwfn->p_tx_cids) {
251 			DP_NOTICE(p_hwfn,
252 				  "Failed to allocate memory for Tx Cids\n");
253 			rc = -ENOMEM;
254 			goto alloc_err;
255 		}
256 
257 		p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
258 		if (!p_hwfn->p_rx_cids) {
259 			DP_NOTICE(p_hwfn,
260 				  "Failed to allocate memory for Rx Cids\n");
261 			rc = -ENOMEM;
262 			goto alloc_err;
263 		}
264 	}
265 
266 	for_each_hwfn(cdev, i) {
267 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
268 
269 		/* First allocate the context manager structure */
270 		rc = qed_cxt_mngr_alloc(p_hwfn);
271 		if (rc)
272 			goto alloc_err;
273 
274 		/* Set the HW cid/tid numbers (in the contest manager)
275 		 * Must be done prior to any further computations.
276 		 */
277 		rc = qed_cxt_set_pf_params(p_hwfn);
278 		if (rc)
279 			goto alloc_err;
280 
281 		/* Prepare and process QM requirements */
282 		rc = qed_init_qm_info(p_hwfn);
283 		if (rc)
284 			goto alloc_err;
285 
286 		/* Compute the ILT client partition */
287 		rc = qed_cxt_cfg_ilt_compute(p_hwfn);
288 		if (rc)
289 			goto alloc_err;
290 
291 		/* CID map / ILT shadow table / T2
292 		 * The talbes sizes are determined by the computations above
293 		 */
294 		rc = qed_cxt_tables_alloc(p_hwfn);
295 		if (rc)
296 			goto alloc_err;
297 
298 		/* SPQ, must follow ILT because initializes SPQ context */
299 		rc = qed_spq_alloc(p_hwfn);
300 		if (rc)
301 			goto alloc_err;
302 
303 		/* SP status block allocation */
304 		p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
305 							 RESERVED_PTT_DPC);
306 
307 		rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
308 		if (rc)
309 			goto alloc_err;
310 
311 		/* EQ */
312 		p_eq = qed_eq_alloc(p_hwfn, 256);
313 		if (!p_eq) {
314 			rc = -ENOMEM;
315 			goto alloc_err;
316 		}
317 		p_hwfn->p_eq = p_eq;
318 
319 		p_consq = qed_consq_alloc(p_hwfn);
320 		if (!p_consq) {
321 			rc = -ENOMEM;
322 			goto alloc_err;
323 		}
324 		p_hwfn->p_consq = p_consq;
325 
326 		/* DMA info initialization */
327 		rc = qed_dmae_info_alloc(p_hwfn);
328 		if (rc) {
329 			DP_NOTICE(p_hwfn,
330 				  "Failed to allocate memory for dmae_info structure\n");
331 			goto alloc_err;
332 		}
333 	}
334 
335 	cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
336 	if (!cdev->reset_stats) {
337 		DP_NOTICE(cdev, "Failed to allocate reset statistics\n");
338 		rc = -ENOMEM;
339 		goto alloc_err;
340 	}
341 
342 	return 0;
343 
344 alloc_err:
345 	qed_resc_free(cdev);
346 	return rc;
347 }
348 
349 void qed_resc_setup(struct qed_dev *cdev)
350 {
351 	int i;
352 
353 	for_each_hwfn(cdev, i) {
354 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
355 
356 		qed_cxt_mngr_setup(p_hwfn);
357 		qed_spq_setup(p_hwfn);
358 		qed_eq_setup(p_hwfn, p_hwfn->p_eq);
359 		qed_consq_setup(p_hwfn, p_hwfn->p_consq);
360 
361 		/* Read shadow of current MFW mailbox */
362 		qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
363 		memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
364 		       p_hwfn->mcp_info->mfw_mb_cur,
365 		       p_hwfn->mcp_info->mfw_mb_length);
366 
367 		qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
368 	}
369 }
370 
371 #define FINAL_CLEANUP_POLL_CNT          (100)
372 #define FINAL_CLEANUP_POLL_TIME         (10)
373 int qed_final_cleanup(struct qed_hwfn *p_hwfn,
374 		      struct qed_ptt *p_ptt,
375 		      u16 id)
376 {
377 	u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
378 	int rc = -EBUSY;
379 
380 	addr = GTT_BAR0_MAP_REG_USDM_RAM +
381 		USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
382 
383 	command |= X_FINAL_CLEANUP_AGG_INT <<
384 		SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
385 	command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
386 	command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
387 	command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
388 
389 	/* Make sure notification is not set before initiating final cleanup */
390 	if (REG_RD(p_hwfn, addr)) {
391 		DP_NOTICE(
392 			p_hwfn,
393 			"Unexpected; Found final cleanup notification before initiating final cleanup\n");
394 		REG_WR(p_hwfn, addr, 0);
395 	}
396 
397 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
398 		   "Sending final cleanup for PFVF[%d] [Command %08x\n]",
399 		   id, command);
400 
401 	qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
402 
403 	/* Poll until completion */
404 	while (!REG_RD(p_hwfn, addr) && count--)
405 		msleep(FINAL_CLEANUP_POLL_TIME);
406 
407 	if (REG_RD(p_hwfn, addr))
408 		rc = 0;
409 	else
410 		DP_NOTICE(p_hwfn,
411 			  "Failed to receive FW final cleanup notification\n");
412 
413 	/* Cleanup afterwards */
414 	REG_WR(p_hwfn, addr, 0);
415 
416 	return rc;
417 }
418 
419 static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
420 {
421 	int hw_mode = 0;
422 
423 	hw_mode = (1 << MODE_BB_B0);
424 
425 	switch (p_hwfn->cdev->num_ports_in_engines) {
426 	case 1:
427 		hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
428 		break;
429 	case 2:
430 		hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
431 		break;
432 	case 4:
433 		hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
434 		break;
435 	default:
436 		DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
437 			  p_hwfn->cdev->num_ports_in_engines);
438 		return;
439 	}
440 
441 	switch (p_hwfn->cdev->mf_mode) {
442 	case QED_MF_DEFAULT:
443 	case QED_MF_NPAR:
444 		hw_mode |= 1 << MODE_MF_SI;
445 		break;
446 	case QED_MF_OVLAN:
447 		hw_mode |= 1 << MODE_MF_SD;
448 		break;
449 	default:
450 		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
451 		hw_mode |= 1 << MODE_MF_SI;
452 	}
453 
454 	hw_mode |= 1 << MODE_ASIC;
455 
456 	p_hwfn->hw_info.hw_mode = hw_mode;
457 }
458 
459 /* Init run time data for all PFs on an engine. */
460 static void qed_init_cau_rt_data(struct qed_dev *cdev)
461 {
462 	u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
463 	int i, sb_id;
464 
465 	for_each_hwfn(cdev, i) {
466 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
467 		struct qed_igu_info *p_igu_info;
468 		struct qed_igu_block *p_block;
469 		struct cau_sb_entry sb_entry;
470 
471 		p_igu_info = p_hwfn->hw_info.p_igu_info;
472 
473 		for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(cdev);
474 		     sb_id++) {
475 			p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
476 			if (!p_block->is_pf)
477 				continue;
478 
479 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
480 					      p_block->function_id,
481 					      0, 0);
482 			STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2,
483 					 sb_entry);
484 		}
485 	}
486 }
487 
488 static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
489 			      struct qed_ptt *p_ptt,
490 			      int hw_mode)
491 {
492 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
493 	struct qed_qm_common_rt_init_params params;
494 	struct qed_dev *cdev = p_hwfn->cdev;
495 	int rc = 0;
496 
497 	qed_init_cau_rt_data(cdev);
498 
499 	/* Program GTT windows */
500 	qed_gtt_init(p_hwfn);
501 
502 	if (p_hwfn->mcp_info) {
503 		if (p_hwfn->mcp_info->func_info.bandwidth_max)
504 			qm_info->pf_rl_en = 1;
505 		if (p_hwfn->mcp_info->func_info.bandwidth_min)
506 			qm_info->pf_wfq_en = 1;
507 	}
508 
509 	memset(&params, 0, sizeof(params));
510 	params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines;
511 	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
512 	params.pf_rl_en = qm_info->pf_rl_en;
513 	params.pf_wfq_en = qm_info->pf_wfq_en;
514 	params.vport_rl_en = qm_info->vport_rl_en;
515 	params.vport_wfq_en = qm_info->vport_wfq_en;
516 	params.port_params = qm_info->qm_port_params;
517 
518 	qed_qm_common_rt_init(p_hwfn, &params);
519 
520 	qed_cxt_hw_init_common(p_hwfn);
521 
522 	/* Close gate from NIG to BRB/Storm; By default they are open, but
523 	 * we close them to prevent NIG from passing data to reset blocks.
524 	 * Should have been done in the ENGINE phase, but init-tool lacks
525 	 * proper port-pretend capabilities.
526 	 */
527 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
528 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
529 	qed_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
530 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
531 	qed_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
532 	qed_port_unpretend(p_hwfn, p_ptt);
533 
534 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
535 	if (rc != 0)
536 		return rc;
537 
538 	qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
539 	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
540 
541 	/* Disable relaxed ordering in the PCI config space */
542 	qed_wr(p_hwfn, p_ptt, 0x20b4,
543 	       qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
544 
545 	return rc;
546 }
547 
548 static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
549 			    struct qed_ptt *p_ptt,
550 			    int hw_mode)
551 {
552 	int rc = 0;
553 
554 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
555 			  hw_mode);
556 	return rc;
557 }
558 
559 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
560 			  struct qed_ptt *p_ptt,
561 			  int hw_mode,
562 			  bool b_hw_start,
563 			  enum qed_int_mode int_mode,
564 			  bool allow_npar_tx_switch)
565 {
566 	u8 rel_pf_id = p_hwfn->rel_pf_id;
567 	int rc = 0;
568 
569 	if (p_hwfn->mcp_info) {
570 		struct qed_mcp_function_info *p_info;
571 
572 		p_info = &p_hwfn->mcp_info->func_info;
573 		if (p_info->bandwidth_min)
574 			p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
575 
576 		/* Update rate limit once we'll actually have a link */
577 		p_hwfn->qm_info.pf_rl = 100;
578 	}
579 
580 	qed_cxt_hw_init_pf(p_hwfn);
581 
582 	qed_int_igu_init_rt(p_hwfn);
583 
584 	/* Set VLAN in NIG if needed */
585 	if (hw_mode & (1 << MODE_MF_SD)) {
586 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
587 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
588 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
589 			     p_hwfn->hw_info.ovlan);
590 	}
591 
592 	/* Enable classification by MAC if needed */
593 	if (hw_mode & (1 << MODE_MF_SI)) {
594 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
595 			   "Configuring TAGMAC_CLS_TYPE\n");
596 		STORE_RT_REG(p_hwfn,
597 			     NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
598 	}
599 
600 	/* Protocl Configuration  */
601 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
602 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
603 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
604 
605 	/* Cleanup chip from previous driver if such remains exist */
606 	rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
607 	if (rc != 0)
608 		return rc;
609 
610 	/* PF Init sequence */
611 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
612 	if (rc)
613 		return rc;
614 
615 	/* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
616 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
617 	if (rc)
618 		return rc;
619 
620 	/* Pure runtime initializations - directly to the HW  */
621 	qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
622 
623 	if (b_hw_start) {
624 		/* enable interrupts */
625 		qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
626 
627 		/* send function start command */
628 		rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
629 		if (rc)
630 			DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
631 	}
632 	return rc;
633 }
634 
635 static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
636 			       struct qed_ptt *p_ptt,
637 			       u8 enable)
638 {
639 	u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
640 
641 	/* Change PF in PXP */
642 	qed_wr(p_hwfn, p_ptt,
643 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
644 
645 	/* wait until value is set - try for 1 second every 50us */
646 	for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
647 		val = qed_rd(p_hwfn, p_ptt,
648 			     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
649 		if (val == set_val)
650 			break;
651 
652 		usleep_range(50, 60);
653 	}
654 
655 	if (val != set_val) {
656 		DP_NOTICE(p_hwfn,
657 			  "PFID_ENABLE_MASTER wasn't changed after a second\n");
658 		return -EAGAIN;
659 	}
660 
661 	return 0;
662 }
663 
664 static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
665 				struct qed_ptt *p_main_ptt)
666 {
667 	/* Read shadow of current MFW mailbox */
668 	qed_mcp_read_mb(p_hwfn, p_main_ptt);
669 	memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
670 	       p_hwfn->mcp_info->mfw_mb_cur,
671 	       p_hwfn->mcp_info->mfw_mb_length);
672 }
673 
674 int qed_hw_init(struct qed_dev *cdev,
675 		bool b_hw_start,
676 		enum qed_int_mode int_mode,
677 		bool allow_npar_tx_switch,
678 		const u8 *bin_fw_data)
679 {
680 	u32 load_code, param;
681 	int rc, mfw_rc, i;
682 
683 	rc = qed_init_fw_data(cdev, bin_fw_data);
684 	if (rc != 0)
685 		return rc;
686 
687 	for_each_hwfn(cdev, i) {
688 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
689 
690 		/* Enable DMAE in PXP */
691 		rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
692 
693 		qed_calc_hw_mode(p_hwfn);
694 
695 		rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
696 				      &load_code);
697 		if (rc) {
698 			DP_NOTICE(p_hwfn, "Failed sending LOAD_REQ command\n");
699 			return rc;
700 		}
701 
702 		qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
703 
704 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
705 			   "Load request was sent. Resp:0x%x, Load code: 0x%x\n",
706 			   rc, load_code);
707 
708 		p_hwfn->first_on_engine = (load_code ==
709 					   FW_MSG_CODE_DRV_LOAD_ENGINE);
710 
711 		switch (load_code) {
712 		case FW_MSG_CODE_DRV_LOAD_ENGINE:
713 			rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
714 						p_hwfn->hw_info.hw_mode);
715 			if (rc)
716 				break;
717 		/* Fall into */
718 		case FW_MSG_CODE_DRV_LOAD_PORT:
719 			rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
720 					      p_hwfn->hw_info.hw_mode);
721 			if (rc)
722 				break;
723 
724 		/* Fall into */
725 		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
726 			rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
727 					    p_hwfn->hw_info.hw_mode,
728 					    b_hw_start, int_mode,
729 					    allow_npar_tx_switch);
730 			break;
731 		default:
732 			rc = -EINVAL;
733 			break;
734 		}
735 
736 		if (rc)
737 			DP_NOTICE(p_hwfn,
738 				  "init phase failed for loadcode 0x%x (rc %d)\n",
739 				   load_code, rc);
740 
741 		/* ACK mfw regardless of success or failure of initialization */
742 		mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
743 				     DRV_MSG_CODE_LOAD_DONE,
744 				     0, &load_code, &param);
745 		if (rc)
746 			return rc;
747 		if (mfw_rc) {
748 			DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
749 			return mfw_rc;
750 		}
751 
752 		p_hwfn->hw_init_done = true;
753 	}
754 
755 	return 0;
756 }
757 
758 #define QED_HW_STOP_RETRY_LIMIT (10)
759 static inline void qed_hw_timers_stop(struct qed_dev *cdev,
760 				      struct qed_hwfn *p_hwfn,
761 				      struct qed_ptt *p_ptt)
762 {
763 	int i;
764 
765 	/* close timers */
766 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
767 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
768 
769 	for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
770 		if ((!qed_rd(p_hwfn, p_ptt,
771 			     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
772 		    (!qed_rd(p_hwfn, p_ptt,
773 			     TM_REG_PF_SCAN_ACTIVE_TASK)))
774 			break;
775 
776 		/* Dependent on number of connection/tasks, possibly
777 		 * 1ms sleep is required between polls
778 		 */
779 		usleep_range(1000, 2000);
780 	}
781 
782 	if (i < QED_HW_STOP_RETRY_LIMIT)
783 		return;
784 
785 	DP_NOTICE(p_hwfn,
786 		  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
787 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
788 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
789 }
790 
791 void qed_hw_timers_stop_all(struct qed_dev *cdev)
792 {
793 	int j;
794 
795 	for_each_hwfn(cdev, j) {
796 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
797 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
798 
799 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
800 	}
801 }
802 
803 int qed_hw_stop(struct qed_dev *cdev)
804 {
805 	int rc = 0, t_rc;
806 	int j;
807 
808 	for_each_hwfn(cdev, j) {
809 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
810 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
811 
812 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
813 
814 		/* mark the hw as uninitialized... */
815 		p_hwfn->hw_init_done = false;
816 
817 		rc = qed_sp_pf_stop(p_hwfn);
818 		if (rc)
819 			DP_NOTICE(p_hwfn,
820 				  "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n");
821 
822 		qed_wr(p_hwfn, p_ptt,
823 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
824 
825 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
826 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
827 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
828 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
829 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
830 
831 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
832 
833 		/* Disable Attention Generation */
834 		qed_int_igu_disable_int(p_hwfn, p_ptt);
835 
836 		qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
837 		qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
838 
839 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
840 
841 		/* Need to wait 1ms to guarantee SBs are cleared */
842 		usleep_range(1000, 2000);
843 	}
844 
845 	/* Disable DMAE in PXP - in CMT, this should only be done for
846 	 * first hw-function, and only after all transactions have
847 	 * stopped for all active hw-functions.
848 	 */
849 	t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
850 				   cdev->hwfns[0].p_main_ptt,
851 				   false);
852 	if (t_rc != 0)
853 		rc = t_rc;
854 
855 	return rc;
856 }
857 
858 void qed_hw_stop_fastpath(struct qed_dev *cdev)
859 {
860 	int j;
861 
862 	for_each_hwfn(cdev, j) {
863 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
864 		struct qed_ptt *p_ptt   = p_hwfn->p_main_ptt;
865 
866 		DP_VERBOSE(p_hwfn,
867 			   NETIF_MSG_IFDOWN,
868 			   "Shutting down the fastpath\n");
869 
870 		qed_wr(p_hwfn, p_ptt,
871 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
872 
873 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
874 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
875 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
876 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
877 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
878 
879 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
880 
881 		/* Need to wait 1ms to guarantee SBs are cleared */
882 		usleep_range(1000, 2000);
883 	}
884 }
885 
886 void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
887 {
888 	/* Re-open incoming traffic */
889 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
890 	       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
891 }
892 
893 static int qed_reg_assert(struct qed_hwfn *hwfn,
894 			  struct qed_ptt *ptt, u32 reg,
895 			  bool expected)
896 {
897 	u32 assert_val = qed_rd(hwfn, ptt, reg);
898 
899 	if (assert_val != expected) {
900 		DP_NOTICE(hwfn, "Value at address 0x%x != 0x%08x\n",
901 			  reg, expected);
902 		return -EINVAL;
903 	}
904 
905 	return 0;
906 }
907 
908 int qed_hw_reset(struct qed_dev *cdev)
909 {
910 	int rc = 0;
911 	u32 unload_resp, unload_param;
912 	int i;
913 
914 	for_each_hwfn(cdev, i) {
915 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
916 
917 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
918 
919 		/* Check for incorrect states */
920 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
921 			       QM_REG_USG_CNT_PF_TX, 0);
922 		qed_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
923 			       QM_REG_USG_CNT_PF_OTHER, 0);
924 
925 		/* Disable PF in HW blocks */
926 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
927 		qed_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
928 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
929 		       TCFC_REG_STRONG_ENABLE_PF, 0);
930 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
931 		       CCFC_REG_STRONG_ENABLE_PF, 0);
932 
933 		/* Send unload command to MCP */
934 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
935 				 DRV_MSG_CODE_UNLOAD_REQ,
936 				 DRV_MB_PARAM_UNLOAD_WOL_MCP,
937 				 &unload_resp, &unload_param);
938 		if (rc) {
939 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_REQ failed\n");
940 			unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
941 		}
942 
943 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
944 				 DRV_MSG_CODE_UNLOAD_DONE,
945 				 0, &unload_resp, &unload_param);
946 		if (rc) {
947 			DP_NOTICE(p_hwfn, "qed_hw_reset: UNLOAD_DONE failed\n");
948 			return rc;
949 		}
950 	}
951 
952 	return rc;
953 }
954 
955 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
956 static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
957 {
958 	qed_ptt_pool_free(p_hwfn);
959 	kfree(p_hwfn->hw_info.p_igu_info);
960 }
961 
962 /* Setup bar access */
963 static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
964 {
965 	/* clear indirect access */
966 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
967 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
968 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
969 	qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
970 
971 	/* Clean Previous errors if such exist */
972 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
973 	       PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
974 	       1 << p_hwfn->abs_pf_id);
975 
976 	/* enable internal target-read */
977 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
978 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
979 }
980 
981 static void get_function_id(struct qed_hwfn *p_hwfn)
982 {
983 	/* ME Register */
984 	p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR);
985 
986 	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
987 
988 	p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
989 	p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
990 				      PXP_CONCRETE_FID_PFID);
991 	p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
992 				    PXP_CONCRETE_FID_PORT);
993 }
994 
995 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
996 {
997 	u32 *feat_num = p_hwfn->hw_info.feat_num;
998 	int num_features = 1;
999 
1000 	feat_num[QED_PF_L2_QUE] = min_t(u32, RESC_NUM(p_hwfn, QED_SB) /
1001 						num_features,
1002 					RESC_NUM(p_hwfn, QED_L2_QUEUE));
1003 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1004 		   "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1005 		   feat_num[QED_PF_L2_QUE], RESC_NUM(p_hwfn, QED_SB),
1006 		   num_features);
1007 }
1008 
1009 static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
1010 {
1011 	u32 *resc_start = p_hwfn->hw_info.resc_start;
1012 	u32 *resc_num = p_hwfn->hw_info.resc_num;
1013 	struct qed_sb_cnt_info sb_cnt_info;
1014 	int num_funcs, i;
1015 
1016 	num_funcs = MAX_NUM_PFS_BB;
1017 
1018 	memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
1019 	qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
1020 
1021 	resc_num[QED_SB] = min_t(u32,
1022 				 (MAX_SB_PER_PATH_BB / num_funcs),
1023 				 sb_cnt_info.sb_cnt);
1024 	resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs;
1025 	resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
1026 	resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
1027 	resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
1028 	resc_num[QED_RL] = 8;
1029 	resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
1030 	resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
1031 			     num_funcs;
1032 	resc_num[QED_ILT] = 950;
1033 
1034 	for (i = 0; i < QED_MAX_RESC; i++)
1035 		resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1036 
1037 	qed_hw_set_feat(p_hwfn);
1038 
1039 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
1040 		   "The numbers for each resource are:\n"
1041 		   "SB = %d start = %d\n"
1042 		   "L2_QUEUE = %d start = %d\n"
1043 		   "VPORT = %d start = %d\n"
1044 		   "PQ = %d start = %d\n"
1045 		   "RL = %d start = %d\n"
1046 		   "MAC = %d start = %d\n"
1047 		   "VLAN = %d start = %d\n"
1048 		   "ILT = %d start = %d\n",
1049 		   p_hwfn->hw_info.resc_num[QED_SB],
1050 		   p_hwfn->hw_info.resc_start[QED_SB],
1051 		   p_hwfn->hw_info.resc_num[QED_L2_QUEUE],
1052 		   p_hwfn->hw_info.resc_start[QED_L2_QUEUE],
1053 		   p_hwfn->hw_info.resc_num[QED_VPORT],
1054 		   p_hwfn->hw_info.resc_start[QED_VPORT],
1055 		   p_hwfn->hw_info.resc_num[QED_PQ],
1056 		   p_hwfn->hw_info.resc_start[QED_PQ],
1057 		   p_hwfn->hw_info.resc_num[QED_RL],
1058 		   p_hwfn->hw_info.resc_start[QED_RL],
1059 		   p_hwfn->hw_info.resc_num[QED_MAC],
1060 		   p_hwfn->hw_info.resc_start[QED_MAC],
1061 		   p_hwfn->hw_info.resc_num[QED_VLAN],
1062 		   p_hwfn->hw_info.resc_start[QED_VLAN],
1063 		   p_hwfn->hw_info.resc_num[QED_ILT],
1064 		   p_hwfn->hw_info.resc_start[QED_ILT]);
1065 }
1066 
1067 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
1068 			       struct qed_ptt *p_ptt)
1069 {
1070 	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1071 	u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
1072 	struct qed_mcp_link_params *link;
1073 
1074 	/* Read global nvm_cfg address */
1075 	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1076 
1077 	/* Verify MCP has initialized it */
1078 	if (!nvm_cfg_addr) {
1079 		DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
1080 		return -EINVAL;
1081 	}
1082 
1083 	/* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
1084 	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1085 
1086 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1087 	       offsetof(struct nvm_cfg1, glob) +
1088 	       offsetof(struct nvm_cfg1_glob, core_cfg);
1089 
1090 	core_cfg = qed_rd(p_hwfn, p_ptt, addr);
1091 
1092 	switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1093 		NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1094 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1095 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
1096 		break;
1097 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1098 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
1099 		break;
1100 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1101 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
1102 		break;
1103 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1104 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
1105 		break;
1106 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1107 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
1108 		break;
1109 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1110 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
1111 		break;
1112 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1113 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
1114 		break;
1115 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1116 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
1117 		break;
1118 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1119 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
1120 		break;
1121 	default:
1122 		DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n",
1123 			  core_cfg);
1124 		break;
1125 	}
1126 
1127 	/* Read default link configuration */
1128 	link = &p_hwfn->mcp_info->link_input;
1129 	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1130 			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1131 	link_temp = qed_rd(p_hwfn, p_ptt,
1132 			   port_cfg_addr +
1133 			   offsetof(struct nvm_cfg1_port, speed_cap_mask));
1134 	link->speed.advertised_speeds =
1135 		link_temp & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1136 
1137 	p_hwfn->mcp_info->link_capabilities.speed_capabilities =
1138 						link->speed.advertised_speeds;
1139 
1140 	link_temp = qed_rd(p_hwfn, p_ptt,
1141 			   port_cfg_addr +
1142 			   offsetof(struct nvm_cfg1_port, link_settings));
1143 	switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1144 		NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1145 	case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1146 		link->speed.autoneg = true;
1147 		break;
1148 	case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1149 		link->speed.forced_speed = 1000;
1150 		break;
1151 	case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1152 		link->speed.forced_speed = 10000;
1153 		break;
1154 	case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1155 		link->speed.forced_speed = 25000;
1156 		break;
1157 	case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1158 		link->speed.forced_speed = 40000;
1159 		break;
1160 	case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1161 		link->speed.forced_speed = 50000;
1162 		break;
1163 	case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1164 		link->speed.forced_speed = 100000;
1165 		break;
1166 	default:
1167 		DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n",
1168 			  link_temp);
1169 	}
1170 
1171 	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1172 	link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1173 	link->pause.autoneg = !!(link_temp &
1174 				 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1175 	link->pause.forced_rx = !!(link_temp &
1176 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1177 	link->pause.forced_tx = !!(link_temp &
1178 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1179 	link->loopback_mode = 0;
1180 
1181 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1182 		   "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x\n",
1183 		   link->speed.forced_speed, link->speed.advertised_speeds,
1184 		   link->speed.autoneg, link->pause.autoneg);
1185 
1186 	/* Read Multi-function information from shmem */
1187 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1188 	       offsetof(struct nvm_cfg1, glob) +
1189 	       offsetof(struct nvm_cfg1_glob, generic_cont0);
1190 
1191 	generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
1192 
1193 	mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
1194 		  NVM_CFG1_GLOB_MF_MODE_OFFSET;
1195 
1196 	switch (mf_mode) {
1197 	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
1198 		p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
1199 		break;
1200 	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
1201 		p_hwfn->cdev->mf_mode = QED_MF_NPAR;
1202 		break;
1203 	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
1204 		p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
1205 		break;
1206 	}
1207 	DP_INFO(p_hwfn, "Multi function mode is %08x\n",
1208 		p_hwfn->cdev->mf_mode);
1209 
1210 	/* Read Multi-function information from shmem */
1211 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1212 		offsetof(struct nvm_cfg1, glob) +
1213 		offsetof(struct nvm_cfg1_glob, device_capabilities);
1214 
1215 	device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
1216 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
1217 		__set_bit(QED_DEV_CAP_ETH,
1218 			  &p_hwfn->hw_info.device_capabilities);
1219 
1220 	return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
1221 }
1222 
1223 static int
1224 qed_get_hw_info(struct qed_hwfn *p_hwfn,
1225 		struct qed_ptt *p_ptt,
1226 		enum qed_pci_personality personality)
1227 {
1228 	u32 port_mode;
1229 	int rc;
1230 
1231 	/* Read the port mode */
1232 	port_mode = qed_rd(p_hwfn, p_ptt,
1233 			   CNIG_REG_NW_PORT_MODE_BB_B0);
1234 
1235 	if (port_mode < 3) {
1236 		p_hwfn->cdev->num_ports_in_engines = 1;
1237 	} else if (port_mode <= 5) {
1238 		p_hwfn->cdev->num_ports_in_engines = 2;
1239 	} else {
1240 		DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
1241 			  p_hwfn->cdev->num_ports_in_engines);
1242 
1243 		/* Default num_ports_in_engines to something */
1244 		p_hwfn->cdev->num_ports_in_engines = 1;
1245 	}
1246 
1247 	qed_hw_get_nvm_info(p_hwfn, p_ptt);
1248 
1249 	rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
1250 	if (rc)
1251 		return rc;
1252 
1253 	if (qed_mcp_is_init(p_hwfn))
1254 		ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
1255 				p_hwfn->mcp_info->func_info.mac);
1256 	else
1257 		eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
1258 
1259 	if (qed_mcp_is_init(p_hwfn)) {
1260 		if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
1261 			p_hwfn->hw_info.ovlan =
1262 				p_hwfn->mcp_info->func_info.ovlan;
1263 
1264 		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
1265 	}
1266 
1267 	if (qed_mcp_is_init(p_hwfn)) {
1268 		enum qed_pci_personality protocol;
1269 
1270 		protocol = p_hwfn->mcp_info->func_info.protocol;
1271 		p_hwfn->hw_info.personality = protocol;
1272 	}
1273 
1274 	qed_hw_get_resc(p_hwfn);
1275 
1276 	return rc;
1277 }
1278 
1279 static int qed_get_dev_info(struct qed_dev *cdev)
1280 {
1281 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1282 	u32 tmp;
1283 
1284 	/* Read Vendor Id / Device Id */
1285 	pci_read_config_word(cdev->pdev, PCI_VENDOR_ID,
1286 			     &cdev->vendor_id);
1287 	pci_read_config_word(cdev->pdev, PCI_DEVICE_ID,
1288 			     &cdev->device_id);
1289 	cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1290 				     MISCS_REG_CHIP_NUM);
1291 	cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1292 				     MISCS_REG_CHIP_REV);
1293 	MASK_FIELD(CHIP_REV, cdev->chip_rev);
1294 
1295 	cdev->type = QED_DEV_TYPE_BB;
1296 	/* Learn number of HW-functions */
1297 	tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1298 		     MISCS_REG_CMT_ENABLED_FOR_PAIR);
1299 
1300 	if (tmp & (1 << p_hwfn->rel_pf_id)) {
1301 		DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
1302 		cdev->num_hwfns = 2;
1303 	} else {
1304 		cdev->num_hwfns = 1;
1305 	}
1306 
1307 	cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1308 				    MISCS_REG_CHIP_TEST_REG) >> 4;
1309 	MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
1310 	cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt,
1311 				       MISCS_REG_CHIP_METAL);
1312 	MASK_FIELD(CHIP_METAL, cdev->chip_metal);
1313 
1314 	DP_INFO(cdev->hwfns,
1315 		"Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
1316 		cdev->chip_num, cdev->chip_rev,
1317 		cdev->chip_bond_id, cdev->chip_metal);
1318 
1319 	if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
1320 		DP_NOTICE(cdev->hwfns,
1321 			  "The chip type/rev (BB A0) is not supported!\n");
1322 		return -EINVAL;
1323 	}
1324 
1325 	return 0;
1326 }
1327 
1328 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
1329 				 void __iomem *p_regview,
1330 				 void __iomem *p_doorbells,
1331 				 enum qed_pci_personality personality)
1332 {
1333 	int rc = 0;
1334 
1335 	/* Split PCI bars evenly between hwfns */
1336 	p_hwfn->regview = p_regview;
1337 	p_hwfn->doorbells = p_doorbells;
1338 
1339 	/* Validate that chip access is feasible */
1340 	if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
1341 		DP_ERR(p_hwfn,
1342 		       "Reading the ME register returns all Fs; Preventing further chip access\n");
1343 		return -EINVAL;
1344 	}
1345 
1346 	get_function_id(p_hwfn);
1347 
1348 	/* Allocate PTT pool */
1349 	rc = qed_ptt_pool_alloc(p_hwfn);
1350 	if (rc) {
1351 		DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n");
1352 		goto err0;
1353 	}
1354 
1355 	/* Allocate the main PTT */
1356 	p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
1357 
1358 	/* First hwfn learns basic information, e.g., number of hwfns */
1359 	if (!p_hwfn->my_id) {
1360 		rc = qed_get_dev_info(p_hwfn->cdev);
1361 		if (rc != 0)
1362 			goto err1;
1363 	}
1364 
1365 	qed_hw_hwfn_prepare(p_hwfn);
1366 
1367 	/* Initialize MCP structure */
1368 	rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
1369 	if (rc) {
1370 		DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
1371 		goto err1;
1372 	}
1373 
1374 	/* Read the device configuration information from the HW and SHMEM */
1375 	rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
1376 	if (rc) {
1377 		DP_NOTICE(p_hwfn, "Failed to get HW information\n");
1378 		goto err2;
1379 	}
1380 
1381 	/* Allocate the init RT array and initialize the init-ops engine */
1382 	rc = qed_init_alloc(p_hwfn);
1383 	if (rc) {
1384 		DP_NOTICE(p_hwfn, "Failed to allocate the init array\n");
1385 		goto err2;
1386 	}
1387 
1388 	return rc;
1389 err2:
1390 	qed_mcp_free(p_hwfn);
1391 err1:
1392 	qed_hw_hwfn_free(p_hwfn);
1393 err0:
1394 	return rc;
1395 }
1396 
1397 int qed_hw_prepare(struct qed_dev *cdev,
1398 		   int personality)
1399 {
1400 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
1401 	int rc;
1402 
1403 	/* Store the precompiled init data ptrs */
1404 	qed_init_iro_array(cdev);
1405 
1406 	/* Initialize the first hwfn - will learn number of hwfns */
1407 	rc = qed_hw_prepare_single(p_hwfn,
1408 				   cdev->regview,
1409 				   cdev->doorbells, personality);
1410 	if (rc)
1411 		return rc;
1412 
1413 	personality = p_hwfn->hw_info.personality;
1414 
1415 	/* Initialize the rest of the hwfns */
1416 	if (cdev->num_hwfns > 1) {
1417 		void __iomem *p_regview, *p_doorbell;
1418 		u8 __iomem *addr;
1419 
1420 		/* adjust bar offset for second engine */
1421 		addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
1422 		p_regview = addr;
1423 
1424 		/* adjust doorbell bar offset for second engine */
1425 		addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
1426 		p_doorbell = addr;
1427 
1428 		/* prepare second hw function */
1429 		rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
1430 					   p_doorbell, personality);
1431 
1432 		/* in case of error, need to free the previously
1433 		 * initiliazed hwfn 0.
1434 		 */
1435 		if (rc) {
1436 			qed_init_free(p_hwfn);
1437 			qed_mcp_free(p_hwfn);
1438 			qed_hw_hwfn_free(p_hwfn);
1439 		}
1440 	}
1441 
1442 	return rc;
1443 }
1444 
1445 void qed_hw_remove(struct qed_dev *cdev)
1446 {
1447 	int i;
1448 
1449 	for_each_hwfn(cdev, i) {
1450 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1451 
1452 		qed_init_free(p_hwfn);
1453 		qed_hw_hwfn_free(p_hwfn);
1454 		qed_mcp_free(p_hwfn);
1455 	}
1456 }
1457 
1458 int qed_chain_alloc(struct qed_dev *cdev,
1459 		    enum qed_chain_use_mode intended_use,
1460 		    enum qed_chain_mode mode,
1461 		    u16 num_elems,
1462 		    size_t elem_size,
1463 		    struct qed_chain *p_chain)
1464 {
1465 	dma_addr_t p_pbl_phys = 0;
1466 	void *p_pbl_virt = NULL;
1467 	dma_addr_t p_phys = 0;
1468 	void *p_virt = NULL;
1469 	u16 page_cnt = 0;
1470 	size_t size;
1471 
1472 	if (mode == QED_CHAIN_MODE_SINGLE)
1473 		page_cnt = 1;
1474 	else
1475 		page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
1476 
1477 	size = page_cnt * QED_CHAIN_PAGE_SIZE;
1478 	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
1479 				    size, &p_phys, GFP_KERNEL);
1480 	if (!p_virt) {
1481 		DP_NOTICE(cdev, "Failed to allocate chain mem\n");
1482 		goto nomem;
1483 	}
1484 
1485 	if (mode == QED_CHAIN_MODE_PBL) {
1486 		size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1487 		p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
1488 						size, &p_pbl_phys,
1489 						GFP_KERNEL);
1490 		if (!p_pbl_virt) {
1491 			DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
1492 			goto nomem;
1493 		}
1494 
1495 		qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
1496 				   (u8)elem_size, intended_use,
1497 				   p_pbl_phys, p_pbl_virt);
1498 	} else {
1499 		qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
1500 			       (u8)elem_size, intended_use, mode);
1501 	}
1502 
1503 	return 0;
1504 
1505 nomem:
1506 	dma_free_coherent(&cdev->pdev->dev,
1507 			  page_cnt * QED_CHAIN_PAGE_SIZE,
1508 			  p_virt, p_phys);
1509 	dma_free_coherent(&cdev->pdev->dev,
1510 			  page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
1511 			  p_pbl_virt, p_pbl_phys);
1512 
1513 	return -ENOMEM;
1514 }
1515 
1516 void qed_chain_free(struct qed_dev *cdev,
1517 		    struct qed_chain *p_chain)
1518 {
1519 	size_t size;
1520 
1521 	if (!p_chain->p_virt_addr)
1522 		return;
1523 
1524 	if (p_chain->mode == QED_CHAIN_MODE_PBL) {
1525 		size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
1526 		dma_free_coherent(&cdev->pdev->dev, size,
1527 				  p_chain->pbl.p_virt_table,
1528 				  p_chain->pbl.p_phys_table);
1529 	}
1530 
1531 	size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
1532 	dma_free_coherent(&cdev->pdev->dev, size,
1533 			  p_chain->p_virt_addr,
1534 			  p_chain->p_phys_addr);
1535 }
1536 
1537 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
1538 		    u16 src_id, u16 *dst_id)
1539 {
1540 	if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
1541 		u16 min, max;
1542 
1543 		min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
1544 		max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
1545 		DP_NOTICE(p_hwfn,
1546 			  "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
1547 			  src_id, min, max);
1548 
1549 		return -EINVAL;
1550 	}
1551 
1552 	*dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
1553 
1554 	return 0;
1555 }
1556 
1557 int qed_fw_vport(struct qed_hwfn *p_hwfn,
1558 		 u8 src_id, u8 *dst_id)
1559 {
1560 	if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
1561 		u8 min, max;
1562 
1563 		min = (u8)RESC_START(p_hwfn, QED_VPORT);
1564 		max = min + RESC_NUM(p_hwfn, QED_VPORT);
1565 		DP_NOTICE(p_hwfn,
1566 			  "vport id [%d] is not valid, available indices [%d - %d]\n",
1567 			  src_id, min, max);
1568 
1569 		return -EINVAL;
1570 	}
1571 
1572 	*dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
1573 
1574 	return 0;
1575 }
1576 
1577 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
1578 		   u8 src_id, u8 *dst_id)
1579 {
1580 	if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
1581 		u8 min, max;
1582 
1583 		min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
1584 		max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
1585 		DP_NOTICE(p_hwfn,
1586 			  "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
1587 			  src_id, min, max);
1588 
1589 		return -EINVAL;
1590 	}
1591 
1592 	*dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
1593 
1594 	return 0;
1595 }
1596