1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/io.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/mutex.h>
41 #include <linux/pci.h>
42 #include <linux/slab.h>
43 #include <linux/string.h>
44 #include <linux/vmalloc.h>
45 #include <linux/etherdevice.h>
46 #include <linux/qed/qed_chain.h>
47 #include <linux/qed/qed_if.h>
48 #include "qed.h"
49 #include "qed_cxt.h"
50 #include "qed_dcbx.h"
51 #include "qed_dev_api.h"
52 #include "qed_fcoe.h"
53 #include "qed_hsi.h"
54 #include "qed_hw.h"
55 #include "qed_init_ops.h"
56 #include "qed_int.h"
57 #include "qed_iscsi.h"
58 #include "qed_ll2.h"
59 #include "qed_mcp.h"
60 #include "qed_ooo.h"
61 #include "qed_reg_addr.h"
62 #include "qed_sp.h"
63 #include "qed_sriov.h"
64 #include "qed_vf.h"
65 #include "qed_rdma.h"
66 
67 static DEFINE_SPINLOCK(qm_lock);
68 
69 #define QED_MIN_DPIS            (4)
70 #define QED_MIN_PWM_REGION      (QED_WID_SIZE * QED_MIN_DPIS)
71 
72 static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
73 			   struct qed_ptt *p_ptt, enum BAR_ID bar_id)
74 {
75 	u32 bar_reg = (bar_id == BAR_ID_0 ?
76 		       PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
77 	u32 val;
78 
79 	if (IS_VF(p_hwfn->cdev))
80 		return qed_vf_hw_bar_size(p_hwfn, bar_id);
81 
82 	val = qed_rd(p_hwfn, p_ptt, bar_reg);
83 	if (val)
84 		return 1 << (val + 15);
85 
86 	/* Old MFW initialized above registered only conditionally */
87 	if (p_hwfn->cdev->num_hwfns > 1) {
88 		DP_INFO(p_hwfn,
89 			"BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
90 			return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
91 	} else {
92 		DP_INFO(p_hwfn,
93 			"BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
94 			return 512 * 1024;
95 	}
96 }
97 
98 void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level)
99 {
100 	u32 i;
101 
102 	cdev->dp_level = dp_level;
103 	cdev->dp_module = dp_module;
104 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
105 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
106 
107 		p_hwfn->dp_level = dp_level;
108 		p_hwfn->dp_module = dp_module;
109 	}
110 }
111 
112 void qed_init_struct(struct qed_dev *cdev)
113 {
114 	u8 i;
115 
116 	for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
117 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
118 
119 		p_hwfn->cdev = cdev;
120 		p_hwfn->my_id = i;
121 		p_hwfn->b_active = false;
122 
123 		mutex_init(&p_hwfn->dmae_info.mutex);
124 	}
125 
126 	/* hwfn 0 is always active */
127 	cdev->hwfns[0].b_active = true;
128 
129 	/* set the default cache alignment to 128 */
130 	cdev->cache_shift = 7;
131 }
132 
133 static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
134 {
135 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
136 
137 	kfree(qm_info->qm_pq_params);
138 	qm_info->qm_pq_params = NULL;
139 	kfree(qm_info->qm_vport_params);
140 	qm_info->qm_vport_params = NULL;
141 	kfree(qm_info->qm_port_params);
142 	qm_info->qm_port_params = NULL;
143 	kfree(qm_info->wfq_data);
144 	qm_info->wfq_data = NULL;
145 }
146 
147 void qed_resc_free(struct qed_dev *cdev)
148 {
149 	int i;
150 
151 	if (IS_VF(cdev)) {
152 		for_each_hwfn(cdev, i)
153 			qed_l2_free(&cdev->hwfns[i]);
154 		return;
155 	}
156 
157 	kfree(cdev->fw_data);
158 	cdev->fw_data = NULL;
159 
160 	kfree(cdev->reset_stats);
161 	cdev->reset_stats = NULL;
162 
163 	for_each_hwfn(cdev, i) {
164 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
165 
166 		qed_cxt_mngr_free(p_hwfn);
167 		qed_qm_info_free(p_hwfn);
168 		qed_spq_free(p_hwfn);
169 		qed_eq_free(p_hwfn);
170 		qed_consq_free(p_hwfn);
171 		qed_int_free(p_hwfn);
172 #ifdef CONFIG_QED_LL2
173 		qed_ll2_free(p_hwfn);
174 #endif
175 		if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
176 			qed_fcoe_free(p_hwfn);
177 
178 		if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
179 			qed_iscsi_free(p_hwfn);
180 			qed_ooo_free(p_hwfn);
181 		}
182 		qed_iov_free(p_hwfn);
183 		qed_l2_free(p_hwfn);
184 		qed_dmae_info_free(p_hwfn);
185 		qed_dcbx_info_free(p_hwfn);
186 	}
187 }
188 
189 /******************** QM initialization *******************/
190 #define ACTIVE_TCS_BMAP 0x9f
191 #define ACTIVE_TCS_BMAP_4PORT_K2 0xf
192 
193 /* determines the physical queue flags for a given PF. */
194 static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
195 {
196 	u32 flags;
197 
198 	/* common flags */
199 	flags = PQ_FLAGS_LB;
200 
201 	/* feature flags */
202 	if (IS_QED_SRIOV(p_hwfn->cdev))
203 		flags |= PQ_FLAGS_VFS;
204 
205 	/* protocol flags */
206 	switch (p_hwfn->hw_info.personality) {
207 	case QED_PCI_ETH:
208 		flags |= PQ_FLAGS_MCOS;
209 		break;
210 	case QED_PCI_FCOE:
211 		flags |= PQ_FLAGS_OFLD;
212 		break;
213 	case QED_PCI_ISCSI:
214 		flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
215 		break;
216 	case QED_PCI_ETH_ROCE:
217 		flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
218 		break;
219 	case QED_PCI_ETH_IWARP:
220 		flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
221 		    PQ_FLAGS_OFLD;
222 		break;
223 	default:
224 		DP_ERR(p_hwfn,
225 		       "unknown personality %d\n", p_hwfn->hw_info.personality);
226 		return 0;
227 	}
228 
229 	return flags;
230 }
231 
232 /* Getters for resource amounts necessary for qm initialization */
233 u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
234 {
235 	return p_hwfn->hw_info.num_hw_tc;
236 }
237 
238 u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
239 {
240 	return IS_QED_SRIOV(p_hwfn->cdev) ?
241 	       p_hwfn->cdev->p_iov_info->total_vfs : 0;
242 }
243 
244 #define NUM_DEFAULT_RLS 1
245 
246 u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
247 {
248 	u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
249 
250 	/* num RLs can't exceed resource amount of rls or vports */
251 	num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL),
252 				 RESC_NUM(p_hwfn, QED_VPORT));
253 
254 	/* Make sure after we reserve there's something left */
255 	if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
256 		return 0;
257 
258 	/* subtract rls necessary for VFs and one default one for the PF */
259 	num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
260 
261 	return num_pf_rls;
262 }
263 
264 u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
265 {
266 	u32 pq_flags = qed_get_pq_flags(p_hwfn);
267 
268 	/* all pqs share the same vport, except for vfs and pf_rl pqs */
269 	return (!!(PQ_FLAGS_RLS & pq_flags)) *
270 	       qed_init_qm_get_num_pf_rls(p_hwfn) +
271 	       (!!(PQ_FLAGS_VFS & pq_flags)) *
272 	       qed_init_qm_get_num_vfs(p_hwfn) + 1;
273 }
274 
275 /* calc amount of PQs according to the requested flags */
276 u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
277 {
278 	u32 pq_flags = qed_get_pq_flags(p_hwfn);
279 
280 	return (!!(PQ_FLAGS_RLS & pq_flags)) *
281 	       qed_init_qm_get_num_pf_rls(p_hwfn) +
282 	       (!!(PQ_FLAGS_MCOS & pq_flags)) *
283 	       qed_init_qm_get_num_tcs(p_hwfn) +
284 	       (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) +
285 	       (!!(PQ_FLAGS_ACK & pq_flags)) + (!!(PQ_FLAGS_OFLD & pq_flags)) +
286 	       (!!(PQ_FLAGS_LLT & pq_flags)) +
287 	       (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn);
288 }
289 
290 /* initialize the top level QM params */
291 static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
292 {
293 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
294 	bool four_port;
295 
296 	/* pq and vport bases for this PF */
297 	qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ);
298 	qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
299 
300 	/* rate limiting and weighted fair queueing are always enabled */
301 	qm_info->vport_rl_en = 1;
302 	qm_info->vport_wfq_en = 1;
303 
304 	/* TC config is different for AH 4 port */
305 	four_port = p_hwfn->cdev->num_ports_in_engine == MAX_NUM_PORTS_K2;
306 
307 	/* in AH 4 port we have fewer TCs per port */
308 	qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
309 						     NUM_OF_PHYS_TCS;
310 
311 	/* unless MFW indicated otherwise, ooo_tc == 3 for
312 	 * AH 4-port and 4 otherwise.
313 	 */
314 	if (!qm_info->ooo_tc)
315 		qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
316 					      DCBX_TCP_OOO_TC;
317 }
318 
319 /* initialize qm vport params */
320 static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
321 {
322 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
323 	u8 i;
324 
325 	/* all vports participate in weighted fair queueing */
326 	for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++)
327 		qm_info->qm_vport_params[i].vport_wfq = 1;
328 }
329 
330 /* initialize qm port params */
331 static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
332 {
333 	/* Initialize qm port parameters */
334 	u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine;
335 
336 	/* indicate how ooo and high pri traffic is dealt with */
337 	active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
338 			  ACTIVE_TCS_BMAP_4PORT_K2 :
339 			  ACTIVE_TCS_BMAP;
340 
341 	for (i = 0; i < num_ports; i++) {
342 		struct init_qm_port_params *p_qm_port =
343 		    &p_hwfn->qm_info.qm_port_params[i];
344 
345 		p_qm_port->active = 1;
346 		p_qm_port->active_phys_tcs = active_phys_tcs;
347 		p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
348 		p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
349 	}
350 }
351 
352 /* Reset the params which must be reset for qm init. QM init may be called as
353  * a result of flows other than driver load (e.g. dcbx renegotiation). Other
354  * params may be affected by the init but would simply recalculate to the same
355  * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
356  * affected as these amounts stay the same.
357  */
358 static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn)
359 {
360 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
361 
362 	qm_info->num_pqs = 0;
363 	qm_info->num_vports = 0;
364 	qm_info->num_pf_rls = 0;
365 	qm_info->num_vf_pqs = 0;
366 	qm_info->first_vf_pq = 0;
367 	qm_info->first_mcos_pq = 0;
368 	qm_info->first_rl_pq = 0;
369 }
370 
371 static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
372 {
373 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
374 
375 	qm_info->num_vports++;
376 
377 	if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
378 		DP_ERR(p_hwfn,
379 		       "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
380 		       qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
381 }
382 
383 /* initialize a single pq and manage qm_info resources accounting.
384  * The pq_init_flags param determines whether the PQ is rate limited
385  * (for VF or PF) and whether a new vport is allocated to the pq or not
386  * (i.e. vport will be shared).
387  */
388 
389 /* flags for pq init */
390 #define PQ_INIT_SHARE_VPORT     (1 << 0)
391 #define PQ_INIT_PF_RL           (1 << 1)
392 #define PQ_INIT_VF_RL           (1 << 2)
393 
394 /* defines for pq init */
395 #define PQ_INIT_DEFAULT_WRR_GROUP       1
396 #define PQ_INIT_DEFAULT_TC              0
397 #define PQ_INIT_OFLD_TC                 (p_hwfn->hw_info.offload_tc)
398 
399 static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
400 			   struct qed_qm_info *qm_info,
401 			   u8 tc, u32 pq_init_flags)
402 {
403 	u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn);
404 
405 	if (pq_idx > max_pq)
406 		DP_ERR(p_hwfn,
407 		       "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
408 
409 	/* init pq params */
410 	qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
411 	    qm_info->num_vports;
412 	qm_info->qm_pq_params[pq_idx].tc_id = tc;
413 	qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
414 	qm_info->qm_pq_params[pq_idx].rl_valid =
415 	    (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL);
416 
417 	/* qm params accounting */
418 	qm_info->num_pqs++;
419 	if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
420 		qm_info->num_vports++;
421 
422 	if (pq_init_flags & PQ_INIT_PF_RL)
423 		qm_info->num_pf_rls++;
424 
425 	if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
426 		DP_ERR(p_hwfn,
427 		       "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
428 		       qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
429 
430 	if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn))
431 		DP_ERR(p_hwfn,
432 		       "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n",
433 		       qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn));
434 }
435 
436 /* get pq index according to PQ_FLAGS */
437 static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
438 					   u32 pq_flags)
439 {
440 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
441 
442 	/* Can't have multiple flags set here */
443 	if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
444 		goto err;
445 
446 	switch (pq_flags) {
447 	case PQ_FLAGS_RLS:
448 		return &qm_info->first_rl_pq;
449 	case PQ_FLAGS_MCOS:
450 		return &qm_info->first_mcos_pq;
451 	case PQ_FLAGS_LB:
452 		return &qm_info->pure_lb_pq;
453 	case PQ_FLAGS_OOO:
454 		return &qm_info->ooo_pq;
455 	case PQ_FLAGS_ACK:
456 		return &qm_info->pure_ack_pq;
457 	case PQ_FLAGS_OFLD:
458 		return &qm_info->offload_pq;
459 	case PQ_FLAGS_LLT:
460 		return &qm_info->low_latency_pq;
461 	case PQ_FLAGS_VFS:
462 		return &qm_info->first_vf_pq;
463 	default:
464 		goto err;
465 	}
466 
467 err:
468 	DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
469 	return NULL;
470 }
471 
472 /* save pq index in qm info */
473 static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn,
474 				u32 pq_flags, u16 pq_val)
475 {
476 	u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
477 
478 	*base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
479 }
480 
481 /* get tx pq index, with the PQ TX base already set (ready for context init) */
482 u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags)
483 {
484 	u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
485 
486 	return *base_pq_idx + CM_TX_PQ_BASE;
487 }
488 
489 u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
490 {
491 	u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
492 
493 	if (tc > max_tc)
494 		DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
495 
496 	return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
497 }
498 
499 u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
500 {
501 	u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
502 
503 	if (vf > max_vf)
504 		DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
505 
506 	return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
507 }
508 
509 u16 qed_get_cm_pq_idx_rl(struct qed_hwfn *p_hwfn, u8 rl)
510 {
511 	u16 max_rl = qed_init_qm_get_num_pf_rls(p_hwfn);
512 
513 	if (rl > max_rl)
514 		DP_ERR(p_hwfn, "rl %d must be smaller than %d\n", rl, max_rl);
515 
516 	return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_RLS) + rl;
517 }
518 
519 /* Functions for creating specific types of pqs */
520 static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn)
521 {
522 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
523 
524 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
525 		return;
526 
527 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
528 	qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
529 }
530 
531 static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn)
532 {
533 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
534 
535 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
536 		return;
537 
538 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
539 	qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
540 }
541 
542 static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn)
543 {
544 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
545 
546 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
547 		return;
548 
549 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
550 	qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
551 }
552 
553 static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn)
554 {
555 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
556 
557 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
558 		return;
559 
560 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
561 	qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
562 }
563 
564 static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn)
565 {
566 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
567 
568 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
569 		return;
570 
571 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
572 	qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_SHARE_VPORT);
573 }
574 
575 static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn)
576 {
577 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
578 	u8 tc_idx;
579 
580 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
581 		return;
582 
583 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
584 	for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++)
585 		qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
586 }
587 
588 static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn)
589 {
590 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
591 	u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
592 
593 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
594 		return;
595 
596 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
597 	qm_info->num_vf_pqs = num_vfs;
598 	for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
599 		qed_init_qm_pq(p_hwfn,
600 			       qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL);
601 }
602 
603 static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn)
604 {
605 	u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn);
606 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
607 
608 	if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
609 		return;
610 
611 	qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
612 	for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
613 		qed_init_qm_pq(p_hwfn, qm_info, PQ_INIT_OFLD_TC, PQ_INIT_PF_RL);
614 }
615 
616 static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn)
617 {
618 	/* rate limited pqs, must come first (FW assumption) */
619 	qed_init_qm_rl_pqs(p_hwfn);
620 
621 	/* pqs for multi cos */
622 	qed_init_qm_mcos_pqs(p_hwfn);
623 
624 	/* pure loopback pq */
625 	qed_init_qm_lb_pq(p_hwfn);
626 
627 	/* out of order pq */
628 	qed_init_qm_ooo_pq(p_hwfn);
629 
630 	/* pure ack pq */
631 	qed_init_qm_pure_ack_pq(p_hwfn);
632 
633 	/* pq for offloaded protocol */
634 	qed_init_qm_offload_pq(p_hwfn);
635 
636 	/* low latency pq */
637 	qed_init_qm_low_latency_pq(p_hwfn);
638 
639 	/* done sharing vports */
640 	qed_init_qm_advance_vport(p_hwfn);
641 
642 	/* pqs for vfs */
643 	qed_init_qm_vf_pqs(p_hwfn);
644 }
645 
646 /* compare values of getters against resources amounts */
647 static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn)
648 {
649 	if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) {
650 		DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
651 		return -EINVAL;
652 	}
653 
654 	if (qed_init_qm_get_num_pqs(p_hwfn) > RESC_NUM(p_hwfn, QED_PQ)) {
655 		DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
656 		return -EINVAL;
657 	}
658 
659 	return 0;
660 }
661 
662 static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
663 {
664 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
665 	struct init_qm_vport_params *vport;
666 	struct init_qm_port_params *port;
667 	struct init_qm_pq_params *pq;
668 	int i, tc;
669 
670 	/* top level params */
671 	DP_VERBOSE(p_hwfn,
672 		   NETIF_MSG_HW,
673 		   "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, pure_ack_pq %d\n",
674 		   qm_info->start_pq,
675 		   qm_info->start_vport,
676 		   qm_info->pure_lb_pq,
677 		   qm_info->offload_pq, qm_info->pure_ack_pq);
678 	DP_VERBOSE(p_hwfn,
679 		   NETIF_MSG_HW,
680 		   "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
681 		   qm_info->ooo_pq,
682 		   qm_info->first_vf_pq,
683 		   qm_info->num_pqs,
684 		   qm_info->num_vf_pqs,
685 		   qm_info->num_vports, qm_info->max_phys_tcs_per_port);
686 	DP_VERBOSE(p_hwfn,
687 		   NETIF_MSG_HW,
688 		   "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
689 		   qm_info->pf_rl_en,
690 		   qm_info->pf_wfq_en,
691 		   qm_info->vport_rl_en,
692 		   qm_info->vport_wfq_en,
693 		   qm_info->pf_wfq,
694 		   qm_info->pf_rl,
695 		   qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn));
696 
697 	/* port table */
698 	for (i = 0; i < p_hwfn->cdev->num_ports_in_engine; i++) {
699 		port = &(qm_info->qm_port_params[i]);
700 		DP_VERBOSE(p_hwfn,
701 			   NETIF_MSG_HW,
702 			   "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
703 			   i,
704 			   port->active,
705 			   port->active_phys_tcs,
706 			   port->num_pbf_cmd_lines,
707 			   port->num_btb_blocks, port->reserved);
708 	}
709 
710 	/* vport table */
711 	for (i = 0; i < qm_info->num_vports; i++) {
712 		vport = &(qm_info->qm_vport_params[i]);
713 		DP_VERBOSE(p_hwfn,
714 			   NETIF_MSG_HW,
715 			   "vport idx %d, vport_rl %d, wfq %d, first_tx_pq_id [ ",
716 			   qm_info->start_vport + i,
717 			   vport->vport_rl, vport->vport_wfq);
718 		for (tc = 0; tc < NUM_OF_TCS; tc++)
719 			DP_VERBOSE(p_hwfn,
720 				   NETIF_MSG_HW,
721 				   "%d ", vport->first_tx_pq_id[tc]);
722 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n");
723 	}
724 
725 	/* pq table */
726 	for (i = 0; i < qm_info->num_pqs; i++) {
727 		pq = &(qm_info->qm_pq_params[i]);
728 		DP_VERBOSE(p_hwfn,
729 			   NETIF_MSG_HW,
730 			   "pq idx %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d\n",
731 			   qm_info->start_pq + i,
732 			   pq->vport_id,
733 			   pq->tc_id, pq->wrr_group, pq->rl_valid);
734 	}
735 }
736 
737 static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
738 {
739 	/* reset params required for init run */
740 	qed_init_qm_reset_params(p_hwfn);
741 
742 	/* init QM top level params */
743 	qed_init_qm_params(p_hwfn);
744 
745 	/* init QM port params */
746 	qed_init_qm_port_params(p_hwfn);
747 
748 	/* init QM vport params */
749 	qed_init_qm_vport_params(p_hwfn);
750 
751 	/* init QM physical queue params */
752 	qed_init_qm_pq_params(p_hwfn);
753 
754 	/* display all that init */
755 	qed_dp_init_qm_params(p_hwfn);
756 }
757 
758 /* This function reconfigures the QM pf on the fly.
759  * For this purpose we:
760  * 1. reconfigure the QM database
761  * 2. set new values to runtime arrat
762  * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
763  * 4. activate init tool in QM_PF stage
764  * 5. send an sdm_qm_cmd through rbc interface to release the QM
765  */
766 int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
767 {
768 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
769 	bool b_rc;
770 	int rc;
771 
772 	/* initialize qed's qm data structure */
773 	qed_init_qm_info(p_hwfn);
774 
775 	/* stop PF's qm queues */
776 	spin_lock_bh(&qm_lock);
777 	b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
778 				    qm_info->start_pq, qm_info->num_pqs);
779 	spin_unlock_bh(&qm_lock);
780 	if (!b_rc)
781 		return -EINVAL;
782 
783 	/* clear the QM_PF runtime phase leftovers from previous init */
784 	qed_init_clear_rt_data(p_hwfn);
785 
786 	/* prepare QM portion of runtime array */
787 	qed_qm_init_pf(p_hwfn, p_ptt);
788 
789 	/* activate init tool on runtime array */
790 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
791 			  p_hwfn->hw_info.hw_mode);
792 	if (rc)
793 		return rc;
794 
795 	/* start PF's qm queues */
796 	spin_lock_bh(&qm_lock);
797 	b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
798 				    qm_info->start_pq, qm_info->num_pqs);
799 	spin_unlock_bh(&qm_lock);
800 	if (!b_rc)
801 		return -EINVAL;
802 
803 	return 0;
804 }
805 
806 static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn)
807 {
808 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
809 	int rc;
810 
811 	rc = qed_init_qm_sanity(p_hwfn);
812 	if (rc)
813 		goto alloc_err;
814 
815 	qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
816 					qed_init_qm_get_num_pqs(p_hwfn),
817 					GFP_KERNEL);
818 	if (!qm_info->qm_pq_params)
819 		goto alloc_err;
820 
821 	qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
822 					   qed_init_qm_get_num_vports(p_hwfn),
823 					   GFP_KERNEL);
824 	if (!qm_info->qm_vport_params)
825 		goto alloc_err;
826 
827 	qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
828 					  p_hwfn->cdev->num_ports_in_engine,
829 					  GFP_KERNEL);
830 	if (!qm_info->qm_port_params)
831 		goto alloc_err;
832 
833 	qm_info->wfq_data = kzalloc(sizeof(*qm_info->wfq_data) *
834 				    qed_init_qm_get_num_vports(p_hwfn),
835 				    GFP_KERNEL);
836 	if (!qm_info->wfq_data)
837 		goto alloc_err;
838 
839 	return 0;
840 
841 alloc_err:
842 	DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
843 	qed_qm_info_free(p_hwfn);
844 	return -ENOMEM;
845 }
846 
847 int qed_resc_alloc(struct qed_dev *cdev)
848 {
849 	u32 rdma_tasks, excess_tasks;
850 	u32 line_count;
851 	int i, rc = 0;
852 
853 	if (IS_VF(cdev)) {
854 		for_each_hwfn(cdev, i) {
855 			rc = qed_l2_alloc(&cdev->hwfns[i]);
856 			if (rc)
857 				return rc;
858 		}
859 		return rc;
860 	}
861 
862 	cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
863 	if (!cdev->fw_data)
864 		return -ENOMEM;
865 
866 	for_each_hwfn(cdev, i) {
867 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
868 		u32 n_eqes, num_cons;
869 
870 		/* First allocate the context manager structure */
871 		rc = qed_cxt_mngr_alloc(p_hwfn);
872 		if (rc)
873 			goto alloc_err;
874 
875 		/* Set the HW cid/tid numbers (in the contest manager)
876 		 * Must be done prior to any further computations.
877 		 */
878 		rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS);
879 		if (rc)
880 			goto alloc_err;
881 
882 		rc = qed_alloc_qm_data(p_hwfn);
883 		if (rc)
884 			goto alloc_err;
885 
886 		/* init qm info */
887 		qed_init_qm_info(p_hwfn);
888 
889 		/* Compute the ILT client partition */
890 		rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
891 		if (rc) {
892 			DP_NOTICE(p_hwfn,
893 				  "too many ILT lines; re-computing with less lines\n");
894 			/* In case there are not enough ILT lines we reduce the
895 			 * number of RDMA tasks and re-compute.
896 			 */
897 			excess_tasks =
898 			    qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count);
899 			if (!excess_tasks)
900 				goto alloc_err;
901 
902 			rdma_tasks = RDMA_MAX_TIDS - excess_tasks;
903 			rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks);
904 			if (rc)
905 				goto alloc_err;
906 
907 			rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
908 			if (rc) {
909 				DP_ERR(p_hwfn,
910 				       "failed ILT compute. Requested too many lines: %u\n",
911 				       line_count);
912 
913 				goto alloc_err;
914 			}
915 		}
916 
917 		/* CID map / ILT shadow table / T2
918 		 * The talbes sizes are determined by the computations above
919 		 */
920 		rc = qed_cxt_tables_alloc(p_hwfn);
921 		if (rc)
922 			goto alloc_err;
923 
924 		/* SPQ, must follow ILT because initializes SPQ context */
925 		rc = qed_spq_alloc(p_hwfn);
926 		if (rc)
927 			goto alloc_err;
928 
929 		/* SP status block allocation */
930 		p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
931 							 RESERVED_PTT_DPC);
932 
933 		rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
934 		if (rc)
935 			goto alloc_err;
936 
937 		rc = qed_iov_alloc(p_hwfn);
938 		if (rc)
939 			goto alloc_err;
940 
941 		/* EQ */
942 		n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
943 		if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
944 			enum protocol_type rdma_proto;
945 
946 			if (QED_IS_ROCE_PERSONALITY(p_hwfn))
947 				rdma_proto = PROTOCOLID_ROCE;
948 			else
949 				rdma_proto = PROTOCOLID_IWARP;
950 
951 			num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
952 							       rdma_proto,
953 							       NULL) * 2;
954 			n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
955 		} else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
956 			num_cons =
957 			    qed_cxt_get_proto_cid_count(p_hwfn,
958 							PROTOCOLID_ISCSI,
959 							NULL);
960 			n_eqes += 2 * num_cons;
961 		}
962 
963 		if (n_eqes > 0xFFFF) {
964 			DP_ERR(p_hwfn,
965 			       "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
966 			       n_eqes, 0xFFFF);
967 			goto alloc_no_mem;
968 		}
969 
970 		rc = qed_eq_alloc(p_hwfn, (u16) n_eqes);
971 		if (rc)
972 			goto alloc_err;
973 
974 		rc = qed_consq_alloc(p_hwfn);
975 		if (rc)
976 			goto alloc_err;
977 
978 		rc = qed_l2_alloc(p_hwfn);
979 		if (rc)
980 			goto alloc_err;
981 
982 #ifdef CONFIG_QED_LL2
983 		if (p_hwfn->using_ll2) {
984 			rc = qed_ll2_alloc(p_hwfn);
985 			if (rc)
986 				goto alloc_err;
987 		}
988 #endif
989 
990 		if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
991 			rc = qed_fcoe_alloc(p_hwfn);
992 			if (rc)
993 				goto alloc_err;
994 		}
995 
996 		if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
997 			rc = qed_iscsi_alloc(p_hwfn);
998 			if (rc)
999 				goto alloc_err;
1000 			rc = qed_ooo_alloc(p_hwfn);
1001 			if (rc)
1002 				goto alloc_err;
1003 		}
1004 
1005 		/* DMA info initialization */
1006 		rc = qed_dmae_info_alloc(p_hwfn);
1007 		if (rc)
1008 			goto alloc_err;
1009 
1010 		/* DCBX initialization */
1011 		rc = qed_dcbx_info_alloc(p_hwfn);
1012 		if (rc)
1013 			goto alloc_err;
1014 	}
1015 
1016 	cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
1017 	if (!cdev->reset_stats)
1018 		goto alloc_no_mem;
1019 
1020 	return 0;
1021 
1022 alloc_no_mem:
1023 	rc = -ENOMEM;
1024 alloc_err:
1025 	qed_resc_free(cdev);
1026 	return rc;
1027 }
1028 
1029 void qed_resc_setup(struct qed_dev *cdev)
1030 {
1031 	int i;
1032 
1033 	if (IS_VF(cdev)) {
1034 		for_each_hwfn(cdev, i)
1035 			qed_l2_setup(&cdev->hwfns[i]);
1036 		return;
1037 	}
1038 
1039 	for_each_hwfn(cdev, i) {
1040 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1041 
1042 		qed_cxt_mngr_setup(p_hwfn);
1043 		qed_spq_setup(p_hwfn);
1044 		qed_eq_setup(p_hwfn);
1045 		qed_consq_setup(p_hwfn);
1046 
1047 		/* Read shadow of current MFW mailbox */
1048 		qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
1049 		memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
1050 		       p_hwfn->mcp_info->mfw_mb_cur,
1051 		       p_hwfn->mcp_info->mfw_mb_length);
1052 
1053 		qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
1054 
1055 		qed_l2_setup(p_hwfn);
1056 		qed_iov_setup(p_hwfn);
1057 #ifdef CONFIG_QED_LL2
1058 		if (p_hwfn->using_ll2)
1059 			qed_ll2_setup(p_hwfn);
1060 #endif
1061 		if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
1062 			qed_fcoe_setup(p_hwfn);
1063 
1064 		if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
1065 			qed_iscsi_setup(p_hwfn);
1066 			qed_ooo_setup(p_hwfn);
1067 		}
1068 	}
1069 }
1070 
1071 #define FINAL_CLEANUP_POLL_CNT          (100)
1072 #define FINAL_CLEANUP_POLL_TIME         (10)
1073 int qed_final_cleanup(struct qed_hwfn *p_hwfn,
1074 		      struct qed_ptt *p_ptt, u16 id, bool is_vf)
1075 {
1076 	u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
1077 	int rc = -EBUSY;
1078 
1079 	addr = GTT_BAR0_MAP_REG_USDM_RAM +
1080 		USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
1081 
1082 	if (is_vf)
1083 		id += 0x10;
1084 
1085 	command |= X_FINAL_CLEANUP_AGG_INT <<
1086 		SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
1087 	command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
1088 	command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
1089 	command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
1090 
1091 	/* Make sure notification is not set before initiating final cleanup */
1092 	if (REG_RD(p_hwfn, addr)) {
1093 		DP_NOTICE(p_hwfn,
1094 			  "Unexpected; Found final cleanup notification before initiating final cleanup\n");
1095 		REG_WR(p_hwfn, addr, 0);
1096 	}
1097 
1098 	DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1099 		   "Sending final cleanup for PFVF[%d] [Command %08x\n]",
1100 		   id, command);
1101 
1102 	qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
1103 
1104 	/* Poll until completion */
1105 	while (!REG_RD(p_hwfn, addr) && count--)
1106 		msleep(FINAL_CLEANUP_POLL_TIME);
1107 
1108 	if (REG_RD(p_hwfn, addr))
1109 		rc = 0;
1110 	else
1111 		DP_NOTICE(p_hwfn,
1112 			  "Failed to receive FW final cleanup notification\n");
1113 
1114 	/* Cleanup afterwards */
1115 	REG_WR(p_hwfn, addr, 0);
1116 
1117 	return rc;
1118 }
1119 
1120 static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
1121 {
1122 	int hw_mode = 0;
1123 
1124 	if (QED_IS_BB_B0(p_hwfn->cdev)) {
1125 		hw_mode |= 1 << MODE_BB;
1126 	} else if (QED_IS_AH(p_hwfn->cdev)) {
1127 		hw_mode |= 1 << MODE_K2;
1128 	} else {
1129 		DP_NOTICE(p_hwfn, "Unknown chip type %#x\n",
1130 			  p_hwfn->cdev->type);
1131 		return -EINVAL;
1132 	}
1133 
1134 	switch (p_hwfn->cdev->num_ports_in_engine) {
1135 	case 1:
1136 		hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
1137 		break;
1138 	case 2:
1139 		hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
1140 		break;
1141 	case 4:
1142 		hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
1143 		break;
1144 	default:
1145 		DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
1146 			  p_hwfn->cdev->num_ports_in_engine);
1147 		return -EINVAL;
1148 	}
1149 
1150 	switch (p_hwfn->cdev->mf_mode) {
1151 	case QED_MF_DEFAULT:
1152 	case QED_MF_NPAR:
1153 		hw_mode |= 1 << MODE_MF_SI;
1154 		break;
1155 	case QED_MF_OVLAN:
1156 		hw_mode |= 1 << MODE_MF_SD;
1157 		break;
1158 	default:
1159 		DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
1160 		hw_mode |= 1 << MODE_MF_SI;
1161 	}
1162 
1163 	hw_mode |= 1 << MODE_ASIC;
1164 
1165 	if (p_hwfn->cdev->num_hwfns > 1)
1166 		hw_mode |= 1 << MODE_100G;
1167 
1168 	p_hwfn->hw_info.hw_mode = hw_mode;
1169 
1170 	DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
1171 		   "Configuring function for hw_mode: 0x%08x\n",
1172 		   p_hwfn->hw_info.hw_mode);
1173 
1174 	return 0;
1175 }
1176 
1177 /* Init run time data for all PFs on an engine. */
1178 static void qed_init_cau_rt_data(struct qed_dev *cdev)
1179 {
1180 	u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
1181 	int i, igu_sb_id;
1182 
1183 	for_each_hwfn(cdev, i) {
1184 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1185 		struct qed_igu_info *p_igu_info;
1186 		struct qed_igu_block *p_block;
1187 		struct cau_sb_entry sb_entry;
1188 
1189 		p_igu_info = p_hwfn->hw_info.p_igu_info;
1190 
1191 		for (igu_sb_id = 0;
1192 		     igu_sb_id < QED_MAPPING_MEMORY_SIZE(cdev); igu_sb_id++) {
1193 			p_block = &p_igu_info->entry[igu_sb_id];
1194 
1195 			if (!p_block->is_pf)
1196 				continue;
1197 
1198 			qed_init_cau_sb_entry(p_hwfn, &sb_entry,
1199 					      p_block->function_id, 0, 0);
1200 			STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
1201 					 sb_entry);
1202 		}
1203 	}
1204 }
1205 
1206 static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
1207 				     struct qed_ptt *p_ptt)
1208 {
1209 	u32 val, wr_mbs, cache_line_size;
1210 
1211 	val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
1212 	switch (val) {
1213 	case 0:
1214 		wr_mbs = 128;
1215 		break;
1216 	case 1:
1217 		wr_mbs = 256;
1218 		break;
1219 	case 2:
1220 		wr_mbs = 512;
1221 		break;
1222 	default:
1223 		DP_INFO(p_hwfn,
1224 			"Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
1225 			val);
1226 		return;
1227 	}
1228 
1229 	cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs);
1230 	switch (cache_line_size) {
1231 	case 32:
1232 		val = 0;
1233 		break;
1234 	case 64:
1235 		val = 1;
1236 		break;
1237 	case 128:
1238 		val = 2;
1239 		break;
1240 	case 256:
1241 		val = 3;
1242 		break;
1243 	default:
1244 		DP_INFO(p_hwfn,
1245 			"Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
1246 			cache_line_size);
1247 	}
1248 
1249 	if (L1_CACHE_BYTES > wr_mbs)
1250 		DP_INFO(p_hwfn,
1251 			"The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
1252 			L1_CACHE_BYTES, wr_mbs);
1253 
1254 	STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
1255 	if (val > 0) {
1256 		STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val);
1257 		STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val);
1258 	}
1259 }
1260 
1261 static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
1262 			      struct qed_ptt *p_ptt, int hw_mode)
1263 {
1264 	struct qed_qm_info *qm_info = &p_hwfn->qm_info;
1265 	struct qed_qm_common_rt_init_params params;
1266 	struct qed_dev *cdev = p_hwfn->cdev;
1267 	u8 vf_id, max_num_vfs;
1268 	u16 num_pfs, pf_id;
1269 	u32 concrete_fid;
1270 	int rc = 0;
1271 
1272 	qed_init_cau_rt_data(cdev);
1273 
1274 	/* Program GTT windows */
1275 	qed_gtt_init(p_hwfn);
1276 
1277 	if (p_hwfn->mcp_info) {
1278 		if (p_hwfn->mcp_info->func_info.bandwidth_max)
1279 			qm_info->pf_rl_en = 1;
1280 		if (p_hwfn->mcp_info->func_info.bandwidth_min)
1281 			qm_info->pf_wfq_en = 1;
1282 	}
1283 
1284 	memset(&params, 0, sizeof(params));
1285 	params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
1286 	params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
1287 	params.pf_rl_en = qm_info->pf_rl_en;
1288 	params.pf_wfq_en = qm_info->pf_wfq_en;
1289 	params.vport_rl_en = qm_info->vport_rl_en;
1290 	params.vport_wfq_en = qm_info->vport_wfq_en;
1291 	params.port_params = qm_info->qm_port_params;
1292 
1293 	qed_qm_common_rt_init(p_hwfn, &params);
1294 
1295 	qed_cxt_hw_init_common(p_hwfn);
1296 
1297 	qed_init_cache_line_size(p_hwfn, p_ptt);
1298 
1299 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
1300 	if (rc)
1301 		return rc;
1302 
1303 	qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
1304 	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
1305 
1306 	if (QED_IS_BB(p_hwfn->cdev)) {
1307 		num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev);
1308 		for (pf_id = 0; pf_id < num_pfs; pf_id++) {
1309 			qed_fid_pretend(p_hwfn, p_ptt, pf_id);
1310 			qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
1311 			qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
1312 		}
1313 		/* pretend to original PF */
1314 		qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
1315 	}
1316 
1317 	max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
1318 	for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
1319 		concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
1320 		qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
1321 		qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
1322 		qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
1323 		qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
1324 		qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
1325 	}
1326 	/* pretend to original PF */
1327 	qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
1328 
1329 	return rc;
1330 }
1331 
1332 static int
1333 qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
1334 		     struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
1335 {
1336 	u32 dpi_bit_shift, dpi_count, dpi_page_size;
1337 	u32 min_dpis;
1338 	u32 n_wids;
1339 
1340 	/* Calculate DPI size */
1341 	n_wids = max_t(u32, QED_MIN_WIDS, n_cpus);
1342 	dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids);
1343 	dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
1344 	dpi_bit_shift = ilog2(dpi_page_size / 4096);
1345 	dpi_count = pwm_region_size / dpi_page_size;
1346 
1347 	min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
1348 	min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis);
1349 
1350 	p_hwfn->dpi_size = dpi_page_size;
1351 	p_hwfn->dpi_count = dpi_count;
1352 
1353 	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
1354 
1355 	if (dpi_count < min_dpis)
1356 		return -EINVAL;
1357 
1358 	return 0;
1359 }
1360 
1361 enum QED_ROCE_EDPM_MODE {
1362 	QED_ROCE_EDPM_MODE_ENABLE = 0,
1363 	QED_ROCE_EDPM_MODE_FORCE_ON = 1,
1364 	QED_ROCE_EDPM_MODE_DISABLE = 2,
1365 };
1366 
1367 static int
1368 qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1369 {
1370 	u32 pwm_regsize, norm_regsize;
1371 	u32 non_pwm_conn, min_addr_reg1;
1372 	u32 db_bar_size, n_cpus = 1;
1373 	u32 roce_edpm_mode;
1374 	u32 pf_dems_shift;
1375 	int rc = 0;
1376 	u8 cond;
1377 
1378 	db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
1379 	if (p_hwfn->cdev->num_hwfns > 1)
1380 		db_bar_size /= 2;
1381 
1382 	/* Calculate doorbell regions */
1383 	non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
1384 		       qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
1385 						   NULL) +
1386 		       qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
1387 						   NULL);
1388 	norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE);
1389 	min_addr_reg1 = norm_regsize / 4096;
1390 	pwm_regsize = db_bar_size - norm_regsize;
1391 
1392 	/* Check that the normal and PWM sizes are valid */
1393 	if (db_bar_size < norm_regsize) {
1394 		DP_ERR(p_hwfn->cdev,
1395 		       "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
1396 		       db_bar_size, norm_regsize);
1397 		return -EINVAL;
1398 	}
1399 
1400 	if (pwm_regsize < QED_MIN_PWM_REGION) {
1401 		DP_ERR(p_hwfn->cdev,
1402 		       "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
1403 		       pwm_regsize,
1404 		       QED_MIN_PWM_REGION, db_bar_size, norm_regsize);
1405 		return -EINVAL;
1406 	}
1407 
1408 	/* Calculate number of DPIs */
1409 	roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
1410 	if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) ||
1411 	    ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) {
1412 		/* Either EDPM is mandatory, or we are attempting to allocate a
1413 		 * WID per CPU.
1414 		 */
1415 		n_cpus = num_present_cpus();
1416 		rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
1417 	}
1418 
1419 	cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) ||
1420 	       (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE);
1421 	if (cond || p_hwfn->dcbx_no_edpm) {
1422 		/* Either EDPM is disabled from user configuration, or it is
1423 		 * disabled via DCBx, or it is not mandatory and we failed to
1424 		 * allocated a WID per CPU.
1425 		 */
1426 		n_cpus = 1;
1427 		rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
1428 
1429 		if (cond)
1430 			qed_rdma_dpm_bar(p_hwfn, p_ptt);
1431 	}
1432 
1433 	p_hwfn->wid_count = (u16) n_cpus;
1434 
1435 	DP_INFO(p_hwfn,
1436 		"doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n",
1437 		norm_regsize,
1438 		pwm_regsize,
1439 		p_hwfn->dpi_size,
1440 		p_hwfn->dpi_count,
1441 		((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ?
1442 		"disabled" : "enabled");
1443 
1444 	if (rc) {
1445 		DP_ERR(p_hwfn,
1446 		       "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n",
1447 		       p_hwfn->dpi_count,
1448 		       p_hwfn->pf_params.rdma_pf_params.min_dpis);
1449 		return -EINVAL;
1450 	}
1451 
1452 	p_hwfn->dpi_start_offset = norm_regsize;
1453 
1454 	/* DEMS size is configured log2 of DWORDs, hence the division by 4 */
1455 	pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4);
1456 	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
1457 	qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
1458 
1459 	return 0;
1460 }
1461 
1462 static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
1463 			    struct qed_ptt *p_ptt, int hw_mode)
1464 {
1465 	int rc = 0;
1466 
1467 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
1468 	if (rc)
1469 		return rc;
1470 
1471 	qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);
1472 
1473 	return 0;
1474 }
1475 
1476 static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
1477 			  struct qed_ptt *p_ptt,
1478 			  struct qed_tunnel_info *p_tunn,
1479 			  int hw_mode,
1480 			  bool b_hw_start,
1481 			  enum qed_int_mode int_mode,
1482 			  bool allow_npar_tx_switch)
1483 {
1484 	u8 rel_pf_id = p_hwfn->rel_pf_id;
1485 	int rc = 0;
1486 
1487 	if (p_hwfn->mcp_info) {
1488 		struct qed_mcp_function_info *p_info;
1489 
1490 		p_info = &p_hwfn->mcp_info->func_info;
1491 		if (p_info->bandwidth_min)
1492 			p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
1493 
1494 		/* Update rate limit once we'll actually have a link */
1495 		p_hwfn->qm_info.pf_rl = 100000;
1496 	}
1497 
1498 	qed_cxt_hw_init_pf(p_hwfn, p_ptt);
1499 
1500 	qed_int_igu_init_rt(p_hwfn);
1501 
1502 	/* Set VLAN in NIG if needed */
1503 	if (hw_mode & BIT(MODE_MF_SD)) {
1504 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
1505 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
1506 		STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
1507 			     p_hwfn->hw_info.ovlan);
1508 	}
1509 
1510 	/* Enable classification by MAC if needed */
1511 	if (hw_mode & BIT(MODE_MF_SI)) {
1512 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
1513 			   "Configuring TAGMAC_CLS_TYPE\n");
1514 		STORE_RT_REG(p_hwfn,
1515 			     NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
1516 	}
1517 
1518 	/* Protocl Configuration  */
1519 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
1520 		     (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
1521 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
1522 		     (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0);
1523 	STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
1524 
1525 	/* Cleanup chip from previous driver if such remains exist */
1526 	rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
1527 	if (rc)
1528 		return rc;
1529 
1530 	/* PF Init sequence */
1531 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
1532 	if (rc)
1533 		return rc;
1534 
1535 	/* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
1536 	rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
1537 	if (rc)
1538 		return rc;
1539 
1540 	/* Pure runtime initializations - directly to the HW  */
1541 	qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
1542 
1543 	rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
1544 	if (rc)
1545 		return rc;
1546 
1547 	if (b_hw_start) {
1548 		/* enable interrupts */
1549 		qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
1550 
1551 		/* send function start command */
1552 		rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn,
1553 				     p_hwfn->cdev->mf_mode,
1554 				     allow_npar_tx_switch);
1555 		if (rc) {
1556 			DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
1557 			return rc;
1558 		}
1559 		if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
1560 			qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2));
1561 			qed_wr(p_hwfn, p_ptt,
1562 			       PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
1563 			       0x100);
1564 		}
1565 	}
1566 	return rc;
1567 }
1568 
1569 static int qed_change_pci_hwfn(struct qed_hwfn *p_hwfn,
1570 			       struct qed_ptt *p_ptt,
1571 			       u8 enable)
1572 {
1573 	u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
1574 
1575 	/* Change PF in PXP */
1576 	qed_wr(p_hwfn, p_ptt,
1577 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
1578 
1579 	/* wait until value is set - try for 1 second every 50us */
1580 	for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
1581 		val = qed_rd(p_hwfn, p_ptt,
1582 			     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1583 		if (val == set_val)
1584 			break;
1585 
1586 		usleep_range(50, 60);
1587 	}
1588 
1589 	if (val != set_val) {
1590 		DP_NOTICE(p_hwfn,
1591 			  "PFID_ENABLE_MASTER wasn't changed after a second\n");
1592 		return -EAGAIN;
1593 	}
1594 
1595 	return 0;
1596 }
1597 
1598 static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
1599 				struct qed_ptt *p_main_ptt)
1600 {
1601 	/* Read shadow of current MFW mailbox */
1602 	qed_mcp_read_mb(p_hwfn, p_main_ptt);
1603 	memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
1604 	       p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
1605 }
1606 
1607 static void
1608 qed_fill_load_req_params(struct qed_load_req_params *p_load_req,
1609 			 struct qed_drv_load_params *p_drv_load)
1610 {
1611 	memset(p_load_req, 0, sizeof(*p_load_req));
1612 
1613 	p_load_req->drv_role = p_drv_load->is_crash_kernel ?
1614 			       QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS;
1615 	p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
1616 	p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
1617 	p_load_req->override_force_load = p_drv_load->override_force_load;
1618 }
1619 
1620 static int qed_vf_start(struct qed_hwfn *p_hwfn,
1621 			struct qed_hw_init_params *p_params)
1622 {
1623 	if (p_params->p_tunn) {
1624 		qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
1625 		qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
1626 	}
1627 
1628 	p_hwfn->b_int_enabled = 1;
1629 
1630 	return 0;
1631 }
1632 
1633 int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
1634 {
1635 	struct qed_load_req_params load_req_params;
1636 	u32 load_code, param, drv_mb_param;
1637 	bool b_default_mtu = true;
1638 	struct qed_hwfn *p_hwfn;
1639 	int rc = 0, mfw_rc, i;
1640 
1641 	if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
1642 		DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
1643 		return -EINVAL;
1644 	}
1645 
1646 	if (IS_PF(cdev)) {
1647 		rc = qed_init_fw_data(cdev, p_params->bin_fw_data);
1648 		if (rc)
1649 			return rc;
1650 	}
1651 
1652 	for_each_hwfn(cdev, i) {
1653 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1654 
1655 		/* If management didn't provide a default, set one of our own */
1656 		if (!p_hwfn->hw_info.mtu) {
1657 			p_hwfn->hw_info.mtu = 1500;
1658 			b_default_mtu = false;
1659 		}
1660 
1661 		if (IS_VF(cdev)) {
1662 			qed_vf_start(p_hwfn, p_params);
1663 			continue;
1664 		}
1665 
1666 		/* Enable DMAE in PXP */
1667 		rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
1668 
1669 		rc = qed_calc_hw_mode(p_hwfn);
1670 		if (rc)
1671 			return rc;
1672 
1673 		qed_fill_load_req_params(&load_req_params,
1674 					 p_params->p_drv_load_params);
1675 		rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
1676 				      &load_req_params);
1677 		if (rc) {
1678 			DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n");
1679 			return rc;
1680 		}
1681 
1682 		load_code = load_req_params.load_code;
1683 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
1684 			   "Load request was sent. Load code: 0x%x\n",
1685 			   load_code);
1686 
1687 		qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
1688 
1689 		qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
1690 
1691 		p_hwfn->first_on_engine = (load_code ==
1692 					   FW_MSG_CODE_DRV_LOAD_ENGINE);
1693 
1694 		switch (load_code) {
1695 		case FW_MSG_CODE_DRV_LOAD_ENGINE:
1696 			rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
1697 						p_hwfn->hw_info.hw_mode);
1698 			if (rc)
1699 				break;
1700 		/* Fall into */
1701 		case FW_MSG_CODE_DRV_LOAD_PORT:
1702 			rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
1703 					      p_hwfn->hw_info.hw_mode);
1704 			if (rc)
1705 				break;
1706 
1707 		/* Fall into */
1708 		case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1709 			rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
1710 					    p_params->p_tunn,
1711 					    p_hwfn->hw_info.hw_mode,
1712 					    p_params->b_hw_start,
1713 					    p_params->int_mode,
1714 					    p_params->allow_npar_tx_switch);
1715 			break;
1716 		default:
1717 			DP_NOTICE(p_hwfn,
1718 				  "Unexpected load code [0x%08x]", load_code);
1719 			rc = -EINVAL;
1720 			break;
1721 		}
1722 
1723 		if (rc)
1724 			DP_NOTICE(p_hwfn,
1725 				  "init phase failed for loadcode 0x%x (rc %d)\n",
1726 				   load_code, rc);
1727 
1728 		/* ACK mfw regardless of success or failure of initialization */
1729 		mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1730 				     DRV_MSG_CODE_LOAD_DONE,
1731 				     0, &load_code, &param);
1732 		if (rc)
1733 			return rc;
1734 		if (mfw_rc) {
1735 			DP_NOTICE(p_hwfn, "Failed sending LOAD_DONE command\n");
1736 			return mfw_rc;
1737 		}
1738 
1739 		/* Check if there is a DID mismatch between nvm-cfg/efuse */
1740 		if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1741 			DP_NOTICE(p_hwfn,
1742 				  "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1743 
1744 		/* send DCBX attention request command */
1745 		DP_VERBOSE(p_hwfn,
1746 			   QED_MSG_DCB,
1747 			   "sending phony dcbx set command to trigger DCBx attention handling\n");
1748 		mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1749 				     DRV_MSG_CODE_SET_DCBX,
1750 				     1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
1751 				     &load_code, &param);
1752 		if (mfw_rc) {
1753 			DP_NOTICE(p_hwfn,
1754 				  "Failed to send DCBX attention request\n");
1755 			return mfw_rc;
1756 		}
1757 
1758 		p_hwfn->hw_init_done = true;
1759 	}
1760 
1761 	if (IS_PF(cdev)) {
1762 		p_hwfn = QED_LEADING_HWFN(cdev);
1763 		drv_mb_param = STORM_FW_VERSION;
1764 		rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1765 				 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
1766 				 drv_mb_param, &load_code, &param);
1767 		if (rc)
1768 			DP_INFO(p_hwfn, "Failed to update firmware version\n");
1769 
1770 		if (!b_default_mtu) {
1771 			rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
1772 						   p_hwfn->hw_info.mtu);
1773 			if (rc)
1774 				DP_INFO(p_hwfn,
1775 					"Failed to update default mtu\n");
1776 		}
1777 
1778 		rc = qed_mcp_ov_update_driver_state(p_hwfn,
1779 						    p_hwfn->p_main_ptt,
1780 						  QED_OV_DRIVER_STATE_DISABLED);
1781 		if (rc)
1782 			DP_INFO(p_hwfn, "Failed to update driver state\n");
1783 
1784 		rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
1785 					       QED_OV_ESWITCH_VEB);
1786 		if (rc)
1787 			DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
1788 	}
1789 
1790 	return 0;
1791 }
1792 
1793 #define QED_HW_STOP_RETRY_LIMIT (10)
1794 static void qed_hw_timers_stop(struct qed_dev *cdev,
1795 			       struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1796 {
1797 	int i;
1798 
1799 	/* close timers */
1800 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
1801 	qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
1802 
1803 	for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
1804 		if ((!qed_rd(p_hwfn, p_ptt,
1805 			     TM_REG_PF_SCAN_ACTIVE_CONN)) &&
1806 		    (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
1807 			break;
1808 
1809 		/* Dependent on number of connection/tasks, possibly
1810 		 * 1ms sleep is required between polls
1811 		 */
1812 		usleep_range(1000, 2000);
1813 	}
1814 
1815 	if (i < QED_HW_STOP_RETRY_LIMIT)
1816 		return;
1817 
1818 	DP_NOTICE(p_hwfn,
1819 		  "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
1820 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
1821 		  (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
1822 }
1823 
1824 void qed_hw_timers_stop_all(struct qed_dev *cdev)
1825 {
1826 	int j;
1827 
1828 	for_each_hwfn(cdev, j) {
1829 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
1830 		struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
1831 
1832 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
1833 	}
1834 }
1835 
1836 int qed_hw_stop(struct qed_dev *cdev)
1837 {
1838 	struct qed_hwfn *p_hwfn;
1839 	struct qed_ptt *p_ptt;
1840 	int rc, rc2 = 0;
1841 	int j;
1842 
1843 	for_each_hwfn(cdev, j) {
1844 		p_hwfn = &cdev->hwfns[j];
1845 		p_ptt = p_hwfn->p_main_ptt;
1846 
1847 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
1848 
1849 		if (IS_VF(cdev)) {
1850 			qed_vf_pf_int_cleanup(p_hwfn);
1851 			rc = qed_vf_pf_reset(p_hwfn);
1852 			if (rc) {
1853 				DP_NOTICE(p_hwfn,
1854 					  "qed_vf_pf_reset failed. rc = %d.\n",
1855 					  rc);
1856 				rc2 = -EINVAL;
1857 			}
1858 			continue;
1859 		}
1860 
1861 		/* mark the hw as uninitialized... */
1862 		p_hwfn->hw_init_done = false;
1863 
1864 		/* Send unload command to MCP */
1865 		rc = qed_mcp_unload_req(p_hwfn, p_ptt);
1866 		if (rc) {
1867 			DP_NOTICE(p_hwfn,
1868 				  "Failed sending a UNLOAD_REQ command. rc = %d.\n",
1869 				  rc);
1870 			rc2 = -EINVAL;
1871 		}
1872 
1873 		qed_slowpath_irq_sync(p_hwfn);
1874 
1875 		/* After this point no MFW attentions are expected, e.g. prevent
1876 		 * race between pf stop and dcbx pf update.
1877 		 */
1878 		rc = qed_sp_pf_stop(p_hwfn);
1879 		if (rc) {
1880 			DP_NOTICE(p_hwfn,
1881 				  "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
1882 				  rc);
1883 			rc2 = -EINVAL;
1884 		}
1885 
1886 		qed_wr(p_hwfn, p_ptt,
1887 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
1888 
1889 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
1890 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
1891 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
1892 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
1893 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
1894 
1895 		qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
1896 
1897 		/* Disable Attention Generation */
1898 		qed_int_igu_disable_int(p_hwfn, p_ptt);
1899 
1900 		qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
1901 		qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
1902 
1903 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
1904 
1905 		/* Need to wait 1ms to guarantee SBs are cleared */
1906 		usleep_range(1000, 2000);
1907 
1908 		/* Disable PF in HW blocks */
1909 		qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
1910 		qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
1911 
1912 		qed_mcp_unload_done(p_hwfn, p_ptt);
1913 		if (rc) {
1914 			DP_NOTICE(p_hwfn,
1915 				  "Failed sending a UNLOAD_DONE command. rc = %d.\n",
1916 				  rc);
1917 			rc2 = -EINVAL;
1918 		}
1919 	}
1920 
1921 	if (IS_PF(cdev)) {
1922 		p_hwfn = QED_LEADING_HWFN(cdev);
1923 		p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt;
1924 
1925 		/* Disable DMAE in PXP - in CMT, this should only be done for
1926 		 * first hw-function, and only after all transactions have
1927 		 * stopped for all active hw-functions.
1928 		 */
1929 		rc = qed_change_pci_hwfn(p_hwfn, p_ptt, false);
1930 		if (rc) {
1931 			DP_NOTICE(p_hwfn,
1932 				  "qed_change_pci_hwfn failed. rc = %d.\n", rc);
1933 			rc2 = -EINVAL;
1934 		}
1935 	}
1936 
1937 	return rc2;
1938 }
1939 
1940 int qed_hw_stop_fastpath(struct qed_dev *cdev)
1941 {
1942 	int j;
1943 
1944 	for_each_hwfn(cdev, j) {
1945 		struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
1946 		struct qed_ptt *p_ptt;
1947 
1948 		if (IS_VF(cdev)) {
1949 			qed_vf_pf_int_cleanup(p_hwfn);
1950 			continue;
1951 		}
1952 		p_ptt = qed_ptt_acquire(p_hwfn);
1953 		if (!p_ptt)
1954 			return -EAGAIN;
1955 
1956 		DP_VERBOSE(p_hwfn,
1957 			   NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
1958 
1959 		qed_wr(p_hwfn, p_ptt,
1960 		       NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
1961 
1962 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
1963 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
1964 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
1965 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
1966 		qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
1967 
1968 		qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
1969 
1970 		/* Need to wait 1ms to guarantee SBs are cleared */
1971 		usleep_range(1000, 2000);
1972 		qed_ptt_release(p_hwfn, p_ptt);
1973 	}
1974 
1975 	return 0;
1976 }
1977 
1978 int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
1979 {
1980 	struct qed_ptt *p_ptt;
1981 
1982 	if (IS_VF(p_hwfn->cdev))
1983 		return 0;
1984 
1985 	p_ptt = qed_ptt_acquire(p_hwfn);
1986 	if (!p_ptt)
1987 		return -EAGAIN;
1988 
1989 	/* If roce info is allocated it means roce is initialized and should
1990 	 * be enabled in searcher.
1991 	 */
1992 	if (p_hwfn->p_rdma_info &&
1993 	    p_hwfn->b_rdma_enabled_in_prs)
1994 		qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
1995 
1996 	/* Re-open incoming traffic */
1997 	qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
1998 	qed_ptt_release(p_hwfn, p_ptt);
1999 
2000 	return 0;
2001 }
2002 
2003 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
2004 static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
2005 {
2006 	qed_ptt_pool_free(p_hwfn);
2007 	kfree(p_hwfn->hw_info.p_igu_info);
2008 	p_hwfn->hw_info.p_igu_info = NULL;
2009 }
2010 
2011 /* Setup bar access */
2012 static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
2013 {
2014 	/* clear indirect access */
2015 	if (QED_IS_AH(p_hwfn->cdev)) {
2016 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
2017 		       PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0);
2018 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
2019 		       PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0);
2020 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
2021 		       PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0);
2022 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
2023 		       PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0);
2024 	} else {
2025 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
2026 		       PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
2027 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
2028 		       PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
2029 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
2030 		       PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
2031 		qed_wr(p_hwfn, p_hwfn->p_main_ptt,
2032 		       PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
2033 	}
2034 
2035 	/* Clean Previous errors if such exist */
2036 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
2037 	       PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
2038 
2039 	/* enable internal target-read */
2040 	qed_wr(p_hwfn, p_hwfn->p_main_ptt,
2041 	       PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
2042 }
2043 
2044 static void get_function_id(struct qed_hwfn *p_hwfn)
2045 {
2046 	/* ME Register */
2047 	p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
2048 						  PXP_PF_ME_OPAQUE_ADDR);
2049 
2050 	p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
2051 
2052 	p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
2053 	p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
2054 				      PXP_CONCRETE_FID_PFID);
2055 	p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
2056 				    PXP_CONCRETE_FID_PORT);
2057 
2058 	DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
2059 		   "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
2060 		   p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
2061 }
2062 
2063 static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
2064 {
2065 	u32 *feat_num = p_hwfn->hw_info.feat_num;
2066 	struct qed_sb_cnt_info sb_cnt;
2067 	u32 non_l2_sbs = 0;
2068 
2069 	memset(&sb_cnt, 0, sizeof(sb_cnt));
2070 	qed_int_get_num_sbs(p_hwfn, &sb_cnt);
2071 
2072 	if (IS_ENABLED(CONFIG_QED_RDMA) &&
2073 	    QED_IS_RDMA_PERSONALITY(p_hwfn)) {
2074 		/* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
2075 		 * the status blocks equally between L2 / RoCE but with
2076 		 * consideration as to how many l2 queues / cnqs we have.
2077 		 */
2078 		feat_num[QED_RDMA_CNQ] =
2079 			min_t(u32, sb_cnt.cnt / 2,
2080 			      RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
2081 
2082 		non_l2_sbs = feat_num[QED_RDMA_CNQ];
2083 	}
2084 	if (QED_IS_L2_PERSONALITY(p_hwfn)) {
2085 		/* Start by allocating VF queues, then PF's */
2086 		feat_num[QED_VF_L2_QUE] = min_t(u32,
2087 						RESC_NUM(p_hwfn, QED_L2_QUEUE),
2088 						sb_cnt.iov_cnt);
2089 		feat_num[QED_PF_L2_QUE] = min_t(u32,
2090 						sb_cnt.cnt - non_l2_sbs,
2091 						RESC_NUM(p_hwfn,
2092 							 QED_L2_QUEUE) -
2093 						FEAT_NUM(p_hwfn,
2094 							 QED_VF_L2_QUE));
2095 	}
2096 
2097 	if (QED_IS_FCOE_PERSONALITY(p_hwfn))
2098 		feat_num[QED_FCOE_CQ] =  min_t(u32, sb_cnt.cnt,
2099 					       RESC_NUM(p_hwfn,
2100 							QED_CMDQS_CQS));
2101 
2102 	if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
2103 		feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
2104 					       RESC_NUM(p_hwfn,
2105 							QED_CMDQS_CQS));
2106 	DP_VERBOSE(p_hwfn,
2107 		   NETIF_MSG_PROBE,
2108 		   "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d #SBS=%d\n",
2109 		   (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
2110 		   (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
2111 		   (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
2112 		   (int)FEAT_NUM(p_hwfn, QED_FCOE_CQ),
2113 		   (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
2114 		   (int)sb_cnt.cnt);
2115 }
2116 
2117 const char *qed_hw_get_resc_name(enum qed_resources res_id)
2118 {
2119 	switch (res_id) {
2120 	case QED_L2_QUEUE:
2121 		return "L2_QUEUE";
2122 	case QED_VPORT:
2123 		return "VPORT";
2124 	case QED_RSS_ENG:
2125 		return "RSS_ENG";
2126 	case QED_PQ:
2127 		return "PQ";
2128 	case QED_RL:
2129 		return "RL";
2130 	case QED_MAC:
2131 		return "MAC";
2132 	case QED_VLAN:
2133 		return "VLAN";
2134 	case QED_RDMA_CNQ_RAM:
2135 		return "RDMA_CNQ_RAM";
2136 	case QED_ILT:
2137 		return "ILT";
2138 	case QED_LL2_QUEUE:
2139 		return "LL2_QUEUE";
2140 	case QED_CMDQS_CQS:
2141 		return "CMDQS_CQS";
2142 	case QED_RDMA_STATS_QUEUE:
2143 		return "RDMA_STATS_QUEUE";
2144 	case QED_BDQ:
2145 		return "BDQ";
2146 	case QED_SB:
2147 		return "SB";
2148 	default:
2149 		return "UNKNOWN_RESOURCE";
2150 	}
2151 }
2152 
2153 static int
2154 __qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
2155 			    struct qed_ptt *p_ptt,
2156 			    enum qed_resources res_id,
2157 			    u32 resc_max_val, u32 *p_mcp_resp)
2158 {
2159 	int rc;
2160 
2161 	rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
2162 				      resc_max_val, p_mcp_resp);
2163 	if (rc) {
2164 		DP_NOTICE(p_hwfn,
2165 			  "MFW response failure for a max value setting of resource %d [%s]\n",
2166 			  res_id, qed_hw_get_resc_name(res_id));
2167 		return rc;
2168 	}
2169 
2170 	if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
2171 		DP_INFO(p_hwfn,
2172 			"Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
2173 			res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp);
2174 
2175 	return 0;
2176 }
2177 
2178 static int
2179 qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2180 {
2181 	bool b_ah = QED_IS_AH(p_hwfn->cdev);
2182 	u32 resc_max_val, mcp_resp;
2183 	u8 res_id;
2184 	int rc;
2185 
2186 	for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
2187 		switch (res_id) {
2188 		case QED_LL2_QUEUE:
2189 			resc_max_val = MAX_NUM_LL2_RX_QUEUES;
2190 			break;
2191 		case QED_RDMA_CNQ_RAM:
2192 			/* No need for a case for QED_CMDQS_CQS since
2193 			 * CNQ/CMDQS are the same resource.
2194 			 */
2195 			resc_max_val = NUM_OF_CMDQS_CQS;
2196 			break;
2197 		case QED_RDMA_STATS_QUEUE:
2198 			resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
2199 			    : RDMA_NUM_STATISTIC_COUNTERS_BB;
2200 			break;
2201 		case QED_BDQ:
2202 			resc_max_val = BDQ_NUM_RESOURCES;
2203 			break;
2204 		default:
2205 			continue;
2206 		}
2207 
2208 		rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
2209 						 resc_max_val, &mcp_resp);
2210 		if (rc)
2211 			return rc;
2212 
2213 		/* There's no point to continue to the next resource if the
2214 		 * command is not supported by the MFW.
2215 		 * We do continue if the command is supported but the resource
2216 		 * is unknown to the MFW. Such a resource will be later
2217 		 * configured with the default allocation values.
2218 		 */
2219 		if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
2220 			return -EINVAL;
2221 	}
2222 
2223 	return 0;
2224 }
2225 
2226 static
2227 int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
2228 			 enum qed_resources res_id,
2229 			 u32 *p_resc_num, u32 *p_resc_start)
2230 {
2231 	u8 num_funcs = p_hwfn->num_funcs_on_engine;
2232 	bool b_ah = QED_IS_AH(p_hwfn->cdev);
2233 
2234 	switch (res_id) {
2235 	case QED_L2_QUEUE:
2236 		*p_resc_num = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
2237 			       MAX_NUM_L2_QUEUES_BB) / num_funcs;
2238 		break;
2239 	case QED_VPORT:
2240 		*p_resc_num = (b_ah ? MAX_NUM_VPORTS_K2 :
2241 			       MAX_NUM_VPORTS_BB) / num_funcs;
2242 		break;
2243 	case QED_RSS_ENG:
2244 		*p_resc_num = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
2245 			       ETH_RSS_ENGINE_NUM_BB) / num_funcs;
2246 		break;
2247 	case QED_PQ:
2248 		*p_resc_num = (b_ah ? MAX_QM_TX_QUEUES_K2 :
2249 			       MAX_QM_TX_QUEUES_BB) / num_funcs;
2250 		*p_resc_num &= ~0x7;	/* The granularity of the PQs is 8 */
2251 		break;
2252 	case QED_RL:
2253 		*p_resc_num = MAX_QM_GLOBAL_RLS / num_funcs;
2254 		break;
2255 	case QED_MAC:
2256 	case QED_VLAN:
2257 		/* Each VFC resource can accommodate both a MAC and a VLAN */
2258 		*p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
2259 		break;
2260 	case QED_ILT:
2261 		*p_resc_num = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
2262 			       PXP_NUM_ILT_RECORDS_BB) / num_funcs;
2263 		break;
2264 	case QED_LL2_QUEUE:
2265 		*p_resc_num = MAX_NUM_LL2_RX_QUEUES / num_funcs;
2266 		break;
2267 	case QED_RDMA_CNQ_RAM:
2268 	case QED_CMDQS_CQS:
2269 		/* CNQ/CMDQS are the same resource */
2270 		*p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
2271 		break;
2272 	case QED_RDMA_STATS_QUEUE:
2273 		*p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
2274 			       RDMA_NUM_STATISTIC_COUNTERS_BB) / num_funcs;
2275 		break;
2276 	case QED_BDQ:
2277 		if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
2278 		    p_hwfn->hw_info.personality != QED_PCI_FCOE)
2279 			*p_resc_num = 0;
2280 		else
2281 			*p_resc_num = 1;
2282 		break;
2283 	case QED_SB:
2284 		/* Since we want its value to reflect whether MFW supports
2285 		 * the new scheme, have a default of 0.
2286 		 */
2287 		*p_resc_num = 0;
2288 		break;
2289 	default:
2290 		return -EINVAL;
2291 	}
2292 
2293 	switch (res_id) {
2294 	case QED_BDQ:
2295 		if (!*p_resc_num)
2296 			*p_resc_start = 0;
2297 		else if (p_hwfn->cdev->num_ports_in_engine == 4)
2298 			*p_resc_start = p_hwfn->port_id;
2299 		else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
2300 			*p_resc_start = p_hwfn->port_id;
2301 		else if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
2302 			*p_resc_start = p_hwfn->port_id + 2;
2303 		break;
2304 	default:
2305 		*p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
2306 		break;
2307 	}
2308 
2309 	return 0;
2310 }
2311 
2312 static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
2313 				  enum qed_resources res_id)
2314 {
2315 	u32 dflt_resc_num = 0, dflt_resc_start = 0;
2316 	u32 mcp_resp, *p_resc_num, *p_resc_start;
2317 	int rc;
2318 
2319 	p_resc_num = &RESC_NUM(p_hwfn, res_id);
2320 	p_resc_start = &RESC_START(p_hwfn, res_id);
2321 
2322 	rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
2323 				  &dflt_resc_start);
2324 	if (rc) {
2325 		DP_ERR(p_hwfn,
2326 		       "Failed to get default amount for resource %d [%s]\n",
2327 		       res_id, qed_hw_get_resc_name(res_id));
2328 		return rc;
2329 	}
2330 
2331 	rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
2332 				   &mcp_resp, p_resc_num, p_resc_start);
2333 	if (rc) {
2334 		DP_NOTICE(p_hwfn,
2335 			  "MFW response failure for an allocation request for resource %d [%s]\n",
2336 			  res_id, qed_hw_get_resc_name(res_id));
2337 		return rc;
2338 	}
2339 
2340 	/* Default driver values are applied in the following cases:
2341 	 * - The resource allocation MB command is not supported by the MFW
2342 	 * - There is an internal error in the MFW while processing the request
2343 	 * - The resource ID is unknown to the MFW
2344 	 */
2345 	if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
2346 		DP_INFO(p_hwfn,
2347 			"Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
2348 			res_id,
2349 			qed_hw_get_resc_name(res_id),
2350 			mcp_resp, dflt_resc_num, dflt_resc_start);
2351 		*p_resc_num = dflt_resc_num;
2352 		*p_resc_start = dflt_resc_start;
2353 		goto out;
2354 	}
2355 
2356 out:
2357 	/* PQs have to divide by 8 [that's the HW granularity].
2358 	 * Reduce number so it would fit.
2359 	 */
2360 	if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) {
2361 		DP_INFO(p_hwfn,
2362 			"PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
2363 			*p_resc_num,
2364 			(*p_resc_num) & ~0x7,
2365 			*p_resc_start, (*p_resc_start) & ~0x7);
2366 		*p_resc_num &= ~0x7;
2367 		*p_resc_start &= ~0x7;
2368 	}
2369 
2370 	return 0;
2371 }
2372 
2373 static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn)
2374 {
2375 	int rc;
2376 	u8 res_id;
2377 
2378 	for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
2379 		rc = __qed_hw_set_resc_info(p_hwfn, res_id);
2380 		if (rc)
2381 			return rc;
2382 	}
2383 
2384 	return 0;
2385 }
2386 
2387 static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2388 {
2389 	struct qed_resc_unlock_params resc_unlock_params;
2390 	struct qed_resc_lock_params resc_lock_params;
2391 	bool b_ah = QED_IS_AH(p_hwfn->cdev);
2392 	u8 res_id;
2393 	int rc;
2394 
2395 	/* Setting the max values of the soft resources and the following
2396 	 * resources allocation queries should be atomic. Since several PFs can
2397 	 * run in parallel - a resource lock is needed.
2398 	 * If either the resource lock or resource set value commands are not
2399 	 * supported - skip the the max values setting, release the lock if
2400 	 * needed, and proceed to the queries. Other failures, including a
2401 	 * failure to acquire the lock, will cause this function to fail.
2402 	 */
2403 	qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
2404 				       QED_RESC_LOCK_RESC_ALLOC, false);
2405 
2406 	rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
2407 	if (rc && rc != -EINVAL) {
2408 		return rc;
2409 	} else if (rc == -EINVAL) {
2410 		DP_INFO(p_hwfn,
2411 			"Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
2412 	} else if (!rc && !resc_lock_params.b_granted) {
2413 		DP_NOTICE(p_hwfn,
2414 			  "Failed to acquire the resource lock for the resource allocation commands\n");
2415 		return -EBUSY;
2416 	} else {
2417 		rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt);
2418 		if (rc && rc != -EINVAL) {
2419 			DP_NOTICE(p_hwfn,
2420 				  "Failed to set the max values of the soft resources\n");
2421 			goto unlock_and_exit;
2422 		} else if (rc == -EINVAL) {
2423 			DP_INFO(p_hwfn,
2424 				"Skip the max values setting of the soft resources since it is not supported by the MFW\n");
2425 			rc = qed_mcp_resc_unlock(p_hwfn, p_ptt,
2426 						 &resc_unlock_params);
2427 			if (rc)
2428 				DP_INFO(p_hwfn,
2429 					"Failed to release the resource lock for the resource allocation commands\n");
2430 		}
2431 	}
2432 
2433 	rc = qed_hw_set_resc_info(p_hwfn);
2434 	if (rc)
2435 		goto unlock_and_exit;
2436 
2437 	if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
2438 		rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
2439 		if (rc)
2440 			DP_INFO(p_hwfn,
2441 				"Failed to release the resource lock for the resource allocation commands\n");
2442 	}
2443 
2444 	/* Sanity for ILT */
2445 	if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
2446 	    (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
2447 		DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
2448 			  RESC_START(p_hwfn, QED_ILT),
2449 			  RESC_END(p_hwfn, QED_ILT) - 1);
2450 		return -EINVAL;
2451 	}
2452 
2453 	/* This will also learn the number of SBs from MFW */
2454 	if (qed_int_igu_reset_cam(p_hwfn, p_ptt))
2455 		return -EINVAL;
2456 
2457 	qed_hw_set_feat(p_hwfn);
2458 
2459 	for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
2460 		DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n",
2461 			   qed_hw_get_resc_name(res_id),
2462 			   RESC_NUM(p_hwfn, res_id),
2463 			   RESC_START(p_hwfn, res_id));
2464 
2465 	return 0;
2466 
2467 unlock_and_exit:
2468 	if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
2469 		qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
2470 	return rc;
2471 }
2472 
2473 static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2474 {
2475 	u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities;
2476 	u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
2477 	struct qed_mcp_link_capabilities *p_caps;
2478 	struct qed_mcp_link_params *link;
2479 
2480 	/* Read global nvm_cfg address */
2481 	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2482 
2483 	/* Verify MCP has initialized it */
2484 	if (!nvm_cfg_addr) {
2485 		DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
2486 		return -EINVAL;
2487 	}
2488 
2489 	/* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
2490 	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2491 
2492 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2493 	       offsetof(struct nvm_cfg1, glob) +
2494 	       offsetof(struct nvm_cfg1_glob, core_cfg);
2495 
2496 	core_cfg = qed_rd(p_hwfn, p_ptt, addr);
2497 
2498 	switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
2499 		NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
2500 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
2501 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
2502 		break;
2503 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
2504 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
2505 		break;
2506 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
2507 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
2508 		break;
2509 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
2510 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
2511 		break;
2512 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
2513 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
2514 		break;
2515 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
2516 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
2517 		break;
2518 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
2519 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
2520 		break;
2521 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
2522 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
2523 		break;
2524 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
2525 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X10G;
2526 		break;
2527 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
2528 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
2529 		break;
2530 	case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
2531 		p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X25G;
2532 		break;
2533 	default:
2534 		DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
2535 		break;
2536 	}
2537 
2538 	/* Read default link configuration */
2539 	link = &p_hwfn->mcp_info->link_input;
2540 	p_caps = &p_hwfn->mcp_info->link_capabilities;
2541 	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2542 			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2543 	link_temp = qed_rd(p_hwfn, p_ptt,
2544 			   port_cfg_addr +
2545 			   offsetof(struct nvm_cfg1_port, speed_cap_mask));
2546 	link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
2547 	link->speed.advertised_speeds = link_temp;
2548 
2549 	link_temp = link->speed.advertised_speeds;
2550 	p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
2551 
2552 	link_temp = qed_rd(p_hwfn, p_ptt,
2553 			   port_cfg_addr +
2554 			   offsetof(struct nvm_cfg1_port, link_settings));
2555 	switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
2556 		NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
2557 	case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
2558 		link->speed.autoneg = true;
2559 		break;
2560 	case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
2561 		link->speed.forced_speed = 1000;
2562 		break;
2563 	case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
2564 		link->speed.forced_speed = 10000;
2565 		break;
2566 	case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
2567 		link->speed.forced_speed = 25000;
2568 		break;
2569 	case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
2570 		link->speed.forced_speed = 40000;
2571 		break;
2572 	case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
2573 		link->speed.forced_speed = 50000;
2574 		break;
2575 	case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
2576 		link->speed.forced_speed = 100000;
2577 		break;
2578 	default:
2579 		DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
2580 	}
2581 
2582 	p_hwfn->mcp_info->link_capabilities.default_speed_autoneg =
2583 		link->speed.autoneg;
2584 
2585 	link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
2586 	link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
2587 	link->pause.autoneg = !!(link_temp &
2588 				 NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
2589 	link->pause.forced_rx = !!(link_temp &
2590 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
2591 	link->pause.forced_tx = !!(link_temp &
2592 				   NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
2593 	link->loopback_mode = 0;
2594 
2595 	if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
2596 		link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr +
2597 				   offsetof(struct nvm_cfg1_port, ext_phy));
2598 		link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK;
2599 		link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET;
2600 		p_caps->default_eee = QED_MCP_EEE_ENABLED;
2601 		link->eee.enable = true;
2602 		switch (link_temp) {
2603 		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED:
2604 			p_caps->default_eee = QED_MCP_EEE_DISABLED;
2605 			link->eee.enable = false;
2606 			break;
2607 		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED:
2608 			p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME;
2609 			break;
2610 		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE:
2611 			p_caps->eee_lpi_timer =
2612 			    EEE_TX_TIMER_USEC_AGGRESSIVE_TIME;
2613 			break;
2614 		case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY:
2615 			p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME;
2616 			break;
2617 		}
2618 
2619 		link->eee.tx_lpi_timer = p_caps->eee_lpi_timer;
2620 		link->eee.tx_lpi_enable = link->eee.enable;
2621 		link->eee.adv_caps = QED_EEE_1G_ADV | QED_EEE_10G_ADV;
2622 	} else {
2623 		p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED;
2624 	}
2625 
2626 	DP_VERBOSE(p_hwfn,
2627 		   NETIF_MSG_LINK,
2628 		   "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n",
2629 		   link->speed.forced_speed,
2630 		   link->speed.advertised_speeds,
2631 		   link->speed.autoneg,
2632 		   link->pause.autoneg,
2633 		   p_caps->default_eee, p_caps->eee_lpi_timer);
2634 
2635 	/* Read Multi-function information from shmem */
2636 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2637 	       offsetof(struct nvm_cfg1, glob) +
2638 	       offsetof(struct nvm_cfg1_glob, generic_cont0);
2639 
2640 	generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
2641 
2642 	mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
2643 		  NVM_CFG1_GLOB_MF_MODE_OFFSET;
2644 
2645 	switch (mf_mode) {
2646 	case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
2647 		p_hwfn->cdev->mf_mode = QED_MF_OVLAN;
2648 		break;
2649 	case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
2650 		p_hwfn->cdev->mf_mode = QED_MF_NPAR;
2651 		break;
2652 	case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
2653 		p_hwfn->cdev->mf_mode = QED_MF_DEFAULT;
2654 		break;
2655 	}
2656 	DP_INFO(p_hwfn, "Multi function mode is %08x\n",
2657 		p_hwfn->cdev->mf_mode);
2658 
2659 	/* Read Multi-function information from shmem */
2660 	addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2661 		offsetof(struct nvm_cfg1, glob) +
2662 		offsetof(struct nvm_cfg1_glob, device_capabilities);
2663 
2664 	device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
2665 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
2666 		__set_bit(QED_DEV_CAP_ETH,
2667 			  &p_hwfn->hw_info.device_capabilities);
2668 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
2669 		__set_bit(QED_DEV_CAP_FCOE,
2670 			  &p_hwfn->hw_info.device_capabilities);
2671 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
2672 		__set_bit(QED_DEV_CAP_ISCSI,
2673 			  &p_hwfn->hw_info.device_capabilities);
2674 	if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
2675 		__set_bit(QED_DEV_CAP_ROCE,
2676 			  &p_hwfn->hw_info.device_capabilities);
2677 
2678 	return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
2679 }
2680 
2681 static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2682 {
2683 	u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
2684 	u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
2685 	struct qed_dev *cdev = p_hwfn->cdev;
2686 
2687 	num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
2688 
2689 	/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
2690 	 * in the other bits are selected.
2691 	 * Bits 1-15 are for functions 1-15, respectively, and their value is
2692 	 * '0' only for enabled functions (function 0 always exists and
2693 	 * enabled).
2694 	 * In case of CMT, only the "even" functions are enabled, and thus the
2695 	 * number of functions for both hwfns is learnt from the same bits.
2696 	 */
2697 	reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
2698 
2699 	if (reg_function_hide & 0x1) {
2700 		if (QED_IS_BB(cdev)) {
2701 			if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) {
2702 				num_funcs = 0;
2703 				eng_mask = 0xaaaa;
2704 			} else {
2705 				num_funcs = 1;
2706 				eng_mask = 0x5554;
2707 			}
2708 		} else {
2709 			num_funcs = 1;
2710 			eng_mask = 0xfffe;
2711 		}
2712 
2713 		/* Get the number of the enabled functions on the engine */
2714 		tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
2715 		while (tmp) {
2716 			if (tmp & 0x1)
2717 				num_funcs++;
2718 			tmp >>= 0x1;
2719 		}
2720 
2721 		/* Get the PF index within the enabled functions */
2722 		low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
2723 		tmp = reg_function_hide & eng_mask & low_pfs_mask;
2724 		while (tmp) {
2725 			if (tmp & 0x1)
2726 				enabled_func_idx--;
2727 			tmp >>= 0x1;
2728 		}
2729 	}
2730 
2731 	p_hwfn->num_funcs_on_engine = num_funcs;
2732 	p_hwfn->enabled_func_idx = enabled_func_idx;
2733 
2734 	DP_VERBOSE(p_hwfn,
2735 		   NETIF_MSG_PROBE,
2736 		   "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
2737 		   p_hwfn->rel_pf_id,
2738 		   p_hwfn->abs_pf_id,
2739 		   p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
2740 }
2741 
2742 static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
2743 				    struct qed_ptt *p_ptt)
2744 {
2745 	u32 port_mode;
2746 
2747 	port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
2748 
2749 	if (port_mode < 3) {
2750 		p_hwfn->cdev->num_ports_in_engine = 1;
2751 	} else if (port_mode <= 5) {
2752 		p_hwfn->cdev->num_ports_in_engine = 2;
2753 	} else {
2754 		DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
2755 			  p_hwfn->cdev->num_ports_in_engine);
2756 
2757 		/* Default num_ports_in_engine to something */
2758 		p_hwfn->cdev->num_ports_in_engine = 1;
2759 	}
2760 }
2761 
2762 static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
2763 				    struct qed_ptt *p_ptt)
2764 {
2765 	u32 port;
2766 	int i;
2767 
2768 	p_hwfn->cdev->num_ports_in_engine = 0;
2769 
2770 	for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
2771 		port = qed_rd(p_hwfn, p_ptt,
2772 			      CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
2773 		if (port & 1)
2774 			p_hwfn->cdev->num_ports_in_engine++;
2775 	}
2776 
2777 	if (!p_hwfn->cdev->num_ports_in_engine) {
2778 		DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
2779 
2780 		/* Default num_ports_in_engine to something */
2781 		p_hwfn->cdev->num_ports_in_engine = 1;
2782 	}
2783 }
2784 
2785 static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2786 {
2787 	if (QED_IS_BB(p_hwfn->cdev))
2788 		qed_hw_info_port_num_bb(p_hwfn, p_ptt);
2789 	else
2790 		qed_hw_info_port_num_ah(p_hwfn, p_ptt);
2791 }
2792 
2793 static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2794 {
2795 	struct qed_mcp_link_capabilities *p_caps;
2796 	u32 eee_status;
2797 
2798 	p_caps = &p_hwfn->mcp_info->link_capabilities;
2799 	if (p_caps->default_eee == QED_MCP_EEE_UNSUPPORTED)
2800 		return;
2801 
2802 	p_caps->eee_speed_caps = 0;
2803 	eee_status = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
2804 			    offsetof(struct public_port, eee_status));
2805 	eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >>
2806 			EEE_SUPPORTED_SPEED_OFFSET;
2807 
2808 	if (eee_status & EEE_1G_SUPPORTED)
2809 		p_caps->eee_speed_caps |= QED_EEE_1G_ADV;
2810 	if (eee_status & EEE_10G_ADV)
2811 		p_caps->eee_speed_caps |= QED_EEE_10G_ADV;
2812 }
2813 
2814 static int
2815 qed_get_hw_info(struct qed_hwfn *p_hwfn,
2816 		struct qed_ptt *p_ptt,
2817 		enum qed_pci_personality personality)
2818 {
2819 	int rc;
2820 
2821 	/* Since all information is common, only first hwfns should do this */
2822 	if (IS_LEAD_HWFN(p_hwfn)) {
2823 		rc = qed_iov_hw_info(p_hwfn);
2824 		if (rc)
2825 			return rc;
2826 	}
2827 
2828 	qed_hw_info_port_num(p_hwfn, p_ptt);
2829 
2830 	qed_mcp_get_capabilities(p_hwfn, p_ptt);
2831 
2832 	qed_hw_get_nvm_info(p_hwfn, p_ptt);
2833 
2834 	rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
2835 	if (rc)
2836 		return rc;
2837 
2838 	if (qed_mcp_is_init(p_hwfn))
2839 		ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
2840 				p_hwfn->mcp_info->func_info.mac);
2841 	else
2842 		eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
2843 
2844 	if (qed_mcp_is_init(p_hwfn)) {
2845 		if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
2846 			p_hwfn->hw_info.ovlan =
2847 				p_hwfn->mcp_info->func_info.ovlan;
2848 
2849 		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
2850 
2851 		qed_get_eee_caps(p_hwfn, p_ptt);
2852 	}
2853 
2854 	if (qed_mcp_is_init(p_hwfn)) {
2855 		enum qed_pci_personality protocol;
2856 
2857 		protocol = p_hwfn->mcp_info->func_info.protocol;
2858 		p_hwfn->hw_info.personality = protocol;
2859 	}
2860 
2861 	p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
2862 	p_hwfn->hw_info.num_active_tc = 1;
2863 
2864 	qed_get_num_funcs(p_hwfn, p_ptt);
2865 
2866 	if (qed_mcp_is_init(p_hwfn))
2867 		p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
2868 
2869 	return qed_hw_get_resc(p_hwfn, p_ptt);
2870 }
2871 
2872 static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2873 {
2874 	struct qed_dev *cdev = p_hwfn->cdev;
2875 	u16 device_id_mask;
2876 	u32 tmp;
2877 
2878 	/* Read Vendor Id / Device Id */
2879 	pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
2880 	pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
2881 
2882 	/* Determine type */
2883 	device_id_mask = cdev->device_id & QED_DEV_ID_MASK;
2884 	switch (device_id_mask) {
2885 	case QED_DEV_ID_MASK_BB:
2886 		cdev->type = QED_DEV_TYPE_BB;
2887 		break;
2888 	case QED_DEV_ID_MASK_AH:
2889 		cdev->type = QED_DEV_TYPE_AH;
2890 		break;
2891 	default:
2892 		DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id);
2893 		return -EBUSY;
2894 	}
2895 
2896 	cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
2897 	cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
2898 
2899 	MASK_FIELD(CHIP_REV, cdev->chip_rev);
2900 
2901 	/* Learn number of HW-functions */
2902 	tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
2903 
2904 	if (tmp & (1 << p_hwfn->rel_pf_id)) {
2905 		DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
2906 		cdev->num_hwfns = 2;
2907 	} else {
2908 		cdev->num_hwfns = 1;
2909 	}
2910 
2911 	cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt,
2912 				    MISCS_REG_CHIP_TEST_REG) >> 4;
2913 	MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
2914 	cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
2915 	MASK_FIELD(CHIP_METAL, cdev->chip_metal);
2916 
2917 	DP_INFO(cdev->hwfns,
2918 		"Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
2919 		QED_IS_BB(cdev) ? "BB" : "AH",
2920 		'A' + cdev->chip_rev,
2921 		(int)cdev->chip_metal,
2922 		cdev->chip_num, cdev->chip_rev,
2923 		cdev->chip_bond_id, cdev->chip_metal);
2924 
2925 	return 0;
2926 }
2927 
2928 static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
2929 				 void __iomem *p_regview,
2930 				 void __iomem *p_doorbells,
2931 				 enum qed_pci_personality personality)
2932 {
2933 	int rc = 0;
2934 
2935 	/* Split PCI bars evenly between hwfns */
2936 	p_hwfn->regview = p_regview;
2937 	p_hwfn->doorbells = p_doorbells;
2938 
2939 	if (IS_VF(p_hwfn->cdev))
2940 		return qed_vf_hw_prepare(p_hwfn);
2941 
2942 	/* Validate that chip access is feasible */
2943 	if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
2944 		DP_ERR(p_hwfn,
2945 		       "Reading the ME register returns all Fs; Preventing further chip access\n");
2946 		return -EINVAL;
2947 	}
2948 
2949 	get_function_id(p_hwfn);
2950 
2951 	/* Allocate PTT pool */
2952 	rc = qed_ptt_pool_alloc(p_hwfn);
2953 	if (rc)
2954 		goto err0;
2955 
2956 	/* Allocate the main PTT */
2957 	p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
2958 
2959 	/* First hwfn learns basic information, e.g., number of hwfns */
2960 	if (!p_hwfn->my_id) {
2961 		rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
2962 		if (rc)
2963 			goto err1;
2964 	}
2965 
2966 	qed_hw_hwfn_prepare(p_hwfn);
2967 
2968 	/* Initialize MCP structure */
2969 	rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
2970 	if (rc) {
2971 		DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
2972 		goto err1;
2973 	}
2974 
2975 	/* Read the device configuration information from the HW and SHMEM */
2976 	rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
2977 	if (rc) {
2978 		DP_NOTICE(p_hwfn, "Failed to get HW information\n");
2979 		goto err2;
2980 	}
2981 
2982 	/* Sending a mailbox to the MFW should be done after qed_get_hw_info()
2983 	 * is called as it sets the ports number in an engine.
2984 	 */
2985 	if (IS_LEAD_HWFN(p_hwfn)) {
2986 		rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
2987 		if (rc)
2988 			DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n");
2989 	}
2990 
2991 	/* Allocate the init RT array and initialize the init-ops engine */
2992 	rc = qed_init_alloc(p_hwfn);
2993 	if (rc)
2994 		goto err2;
2995 
2996 	return rc;
2997 err2:
2998 	if (IS_LEAD_HWFN(p_hwfn))
2999 		qed_iov_free_hw_info(p_hwfn->cdev);
3000 	qed_mcp_free(p_hwfn);
3001 err1:
3002 	qed_hw_hwfn_free(p_hwfn);
3003 err0:
3004 	return rc;
3005 }
3006 
3007 int qed_hw_prepare(struct qed_dev *cdev,
3008 		   int personality)
3009 {
3010 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3011 	int rc;
3012 
3013 	/* Store the precompiled init data ptrs */
3014 	if (IS_PF(cdev))
3015 		qed_init_iro_array(cdev);
3016 
3017 	/* Initialize the first hwfn - will learn number of hwfns */
3018 	rc = qed_hw_prepare_single(p_hwfn,
3019 				   cdev->regview,
3020 				   cdev->doorbells, personality);
3021 	if (rc)
3022 		return rc;
3023 
3024 	personality = p_hwfn->hw_info.personality;
3025 
3026 	/* Initialize the rest of the hwfns */
3027 	if (cdev->num_hwfns > 1) {
3028 		void __iomem *p_regview, *p_doorbell;
3029 		u8 __iomem *addr;
3030 
3031 		/* adjust bar offset for second engine */
3032 		addr = cdev->regview +
3033 		       qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
3034 				       BAR_ID_0) / 2;
3035 		p_regview = addr;
3036 
3037 		addr = cdev->doorbells +
3038 		       qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
3039 				       BAR_ID_1) / 2;
3040 		p_doorbell = addr;
3041 
3042 		/* prepare second hw function */
3043 		rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
3044 					   p_doorbell, personality);
3045 
3046 		/* in case of error, need to free the previously
3047 		 * initiliazed hwfn 0.
3048 		 */
3049 		if (rc) {
3050 			if (IS_PF(cdev)) {
3051 				qed_init_free(p_hwfn);
3052 				qed_mcp_free(p_hwfn);
3053 				qed_hw_hwfn_free(p_hwfn);
3054 			}
3055 		}
3056 	}
3057 
3058 	return rc;
3059 }
3060 
3061 void qed_hw_remove(struct qed_dev *cdev)
3062 {
3063 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3064 	int i;
3065 
3066 	if (IS_PF(cdev))
3067 		qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
3068 					       QED_OV_DRIVER_STATE_NOT_LOADED);
3069 
3070 	for_each_hwfn(cdev, i) {
3071 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
3072 
3073 		if (IS_VF(cdev)) {
3074 			qed_vf_pf_release(p_hwfn);
3075 			continue;
3076 		}
3077 
3078 		qed_init_free(p_hwfn);
3079 		qed_hw_hwfn_free(p_hwfn);
3080 		qed_mcp_free(p_hwfn);
3081 	}
3082 
3083 	qed_iov_free_hw_info(cdev);
3084 }
3085 
3086 static void qed_chain_free_next_ptr(struct qed_dev *cdev,
3087 				    struct qed_chain *p_chain)
3088 {
3089 	void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL;
3090 	dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
3091 	struct qed_chain_next *p_next;
3092 	u32 size, i;
3093 
3094 	if (!p_virt)
3095 		return;
3096 
3097 	size = p_chain->elem_size * p_chain->usable_per_page;
3098 
3099 	for (i = 0; i < p_chain->page_cnt; i++) {
3100 		if (!p_virt)
3101 			break;
3102 
3103 		p_next = (struct qed_chain_next *)((u8 *)p_virt + size);
3104 		p_virt_next = p_next->next_virt;
3105 		p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
3106 
3107 		dma_free_coherent(&cdev->pdev->dev,
3108 				  QED_CHAIN_PAGE_SIZE, p_virt, p_phys);
3109 
3110 		p_virt = p_virt_next;
3111 		p_phys = p_phys_next;
3112 	}
3113 }
3114 
3115 static void qed_chain_free_single(struct qed_dev *cdev,
3116 				  struct qed_chain *p_chain)
3117 {
3118 	if (!p_chain->p_virt_addr)
3119 		return;
3120 
3121 	dma_free_coherent(&cdev->pdev->dev,
3122 			  QED_CHAIN_PAGE_SIZE,
3123 			  p_chain->p_virt_addr, p_chain->p_phys_addr);
3124 }
3125 
3126 static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
3127 {
3128 	void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
3129 	u32 page_cnt = p_chain->page_cnt, i, pbl_size;
3130 	u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;
3131 
3132 	if (!pp_virt_addr_tbl)
3133 		return;
3134 
3135 	if (!p_pbl_virt)
3136 		goto out;
3137 
3138 	for (i = 0; i < page_cnt; i++) {
3139 		if (!pp_virt_addr_tbl[i])
3140 			break;
3141 
3142 		dma_free_coherent(&cdev->pdev->dev,
3143 				  QED_CHAIN_PAGE_SIZE,
3144 				  pp_virt_addr_tbl[i],
3145 				  *(dma_addr_t *)p_pbl_virt);
3146 
3147 		p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
3148 	}
3149 
3150 	pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
3151 
3152 	if (!p_chain->b_external_pbl)
3153 		dma_free_coherent(&cdev->pdev->dev,
3154 				  pbl_size,
3155 				  p_chain->pbl_sp.p_virt_table,
3156 				  p_chain->pbl_sp.p_phys_table);
3157 out:
3158 	vfree(p_chain->pbl.pp_virt_addr_tbl);
3159 	p_chain->pbl.pp_virt_addr_tbl = NULL;
3160 }
3161 
3162 void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
3163 {
3164 	switch (p_chain->mode) {
3165 	case QED_CHAIN_MODE_NEXT_PTR:
3166 		qed_chain_free_next_ptr(cdev, p_chain);
3167 		break;
3168 	case QED_CHAIN_MODE_SINGLE:
3169 		qed_chain_free_single(cdev, p_chain);
3170 		break;
3171 	case QED_CHAIN_MODE_PBL:
3172 		qed_chain_free_pbl(cdev, p_chain);
3173 		break;
3174 	}
3175 }
3176 
3177 static int
3178 qed_chain_alloc_sanity_check(struct qed_dev *cdev,
3179 			     enum qed_chain_cnt_type cnt_type,
3180 			     size_t elem_size, u32 page_cnt)
3181 {
3182 	u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
3183 
3184 	/* The actual chain size can be larger than the maximal possible value
3185 	 * after rounding up the requested elements number to pages, and after
3186 	 * taking into acount the unusuable elements (next-ptr elements).
3187 	 * The size of a "u16" chain can be (U16_MAX + 1) since the chain
3188 	 * size/capacity fields are of a u32 type.
3189 	 */
3190 	if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
3191 	     chain_size > ((u32)U16_MAX + 1)) ||
3192 	    (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
3193 		DP_NOTICE(cdev,
3194 			  "The actual chain size (0x%llx) is larger than the maximal possible value\n",
3195 			  chain_size);
3196 		return -EINVAL;
3197 	}
3198 
3199 	return 0;
3200 }
3201 
3202 static int
3203 qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
3204 {
3205 	void *p_virt = NULL, *p_virt_prev = NULL;
3206 	dma_addr_t p_phys = 0;
3207 	u32 i;
3208 
3209 	for (i = 0; i < p_chain->page_cnt; i++) {
3210 		p_virt = dma_alloc_coherent(&cdev->pdev->dev,
3211 					    QED_CHAIN_PAGE_SIZE,
3212 					    &p_phys, GFP_KERNEL);
3213 		if (!p_virt)
3214 			return -ENOMEM;
3215 
3216 		if (i == 0) {
3217 			qed_chain_init_mem(p_chain, p_virt, p_phys);
3218 			qed_chain_reset(p_chain);
3219 		} else {
3220 			qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
3221 						     p_virt, p_phys);
3222 		}
3223 
3224 		p_virt_prev = p_virt;
3225 	}
3226 	/* Last page's next element should point to the beginning of the
3227 	 * chain.
3228 	 */
3229 	qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
3230 				     p_chain->p_virt_addr,
3231 				     p_chain->p_phys_addr);
3232 
3233 	return 0;
3234 }
3235 
3236 static int
3237 qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
3238 {
3239 	dma_addr_t p_phys = 0;
3240 	void *p_virt = NULL;
3241 
3242 	p_virt = dma_alloc_coherent(&cdev->pdev->dev,
3243 				    QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
3244 	if (!p_virt)
3245 		return -ENOMEM;
3246 
3247 	qed_chain_init_mem(p_chain, p_virt, p_phys);
3248 	qed_chain_reset(p_chain);
3249 
3250 	return 0;
3251 }
3252 
3253 static int
3254 qed_chain_alloc_pbl(struct qed_dev *cdev,
3255 		    struct qed_chain *p_chain,
3256 		    struct qed_chain_ext_pbl *ext_pbl)
3257 {
3258 	u32 page_cnt = p_chain->page_cnt, size, i;
3259 	dma_addr_t p_phys = 0, p_pbl_phys = 0;
3260 	void **pp_virt_addr_tbl = NULL;
3261 	u8 *p_pbl_virt = NULL;
3262 	void *p_virt = NULL;
3263 
3264 	size = page_cnt * sizeof(*pp_virt_addr_tbl);
3265 	pp_virt_addr_tbl = vzalloc(size);
3266 	if (!pp_virt_addr_tbl)
3267 		return -ENOMEM;
3268 
3269 	/* The allocation of the PBL table is done with its full size, since it
3270 	 * is expected to be successive.
3271 	 * qed_chain_init_pbl_mem() is called even in a case of an allocation
3272 	 * failure, since pp_virt_addr_tbl was previously allocated, and it
3273 	 * should be saved to allow its freeing during the error flow.
3274 	 */
3275 	size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
3276 
3277 	if (!ext_pbl) {
3278 		p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
3279 						size, &p_pbl_phys, GFP_KERNEL);
3280 	} else {
3281 		p_pbl_virt = ext_pbl->p_pbl_virt;
3282 		p_pbl_phys = ext_pbl->p_pbl_phys;
3283 		p_chain->b_external_pbl = true;
3284 	}
3285 
3286 	qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
3287 			       pp_virt_addr_tbl);
3288 	if (!p_pbl_virt)
3289 		return -ENOMEM;
3290 
3291 	for (i = 0; i < page_cnt; i++) {
3292 		p_virt = dma_alloc_coherent(&cdev->pdev->dev,
3293 					    QED_CHAIN_PAGE_SIZE,
3294 					    &p_phys, GFP_KERNEL);
3295 		if (!p_virt)
3296 			return -ENOMEM;
3297 
3298 		if (i == 0) {
3299 			qed_chain_init_mem(p_chain, p_virt, p_phys);
3300 			qed_chain_reset(p_chain);
3301 		}
3302 
3303 		/* Fill the PBL table with the physical address of the page */
3304 		*(dma_addr_t *)p_pbl_virt = p_phys;
3305 		/* Keep the virtual address of the page */
3306 		p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
3307 
3308 		p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
3309 	}
3310 
3311 	return 0;
3312 }
3313 
3314 int qed_chain_alloc(struct qed_dev *cdev,
3315 		    enum qed_chain_use_mode intended_use,
3316 		    enum qed_chain_mode mode,
3317 		    enum qed_chain_cnt_type cnt_type,
3318 		    u32 num_elems,
3319 		    size_t elem_size,
3320 		    struct qed_chain *p_chain,
3321 		    struct qed_chain_ext_pbl *ext_pbl)
3322 {
3323 	u32 page_cnt;
3324 	int rc = 0;
3325 
3326 	if (mode == QED_CHAIN_MODE_SINGLE)
3327 		page_cnt = 1;
3328 	else
3329 		page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
3330 
3331 	rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
3332 	if (rc) {
3333 		DP_NOTICE(cdev,
3334 			  "Cannot allocate a chain with the given arguments:\n");
3335 		DP_NOTICE(cdev,
3336 			  "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
3337 			  intended_use, mode, cnt_type, num_elems, elem_size);
3338 		return rc;
3339 	}
3340 
3341 	qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use,
3342 			      mode, cnt_type);
3343 
3344 	switch (mode) {
3345 	case QED_CHAIN_MODE_NEXT_PTR:
3346 		rc = qed_chain_alloc_next_ptr(cdev, p_chain);
3347 		break;
3348 	case QED_CHAIN_MODE_SINGLE:
3349 		rc = qed_chain_alloc_single(cdev, p_chain);
3350 		break;
3351 	case QED_CHAIN_MODE_PBL:
3352 		rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl);
3353 		break;
3354 	}
3355 	if (rc)
3356 		goto nomem;
3357 
3358 	return 0;
3359 
3360 nomem:
3361 	qed_chain_free(cdev, p_chain);
3362 	return rc;
3363 }
3364 
3365 int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
3366 {
3367 	if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
3368 		u16 min, max;
3369 
3370 		min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE);
3371 		max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
3372 		DP_NOTICE(p_hwfn,
3373 			  "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
3374 			  src_id, min, max);
3375 
3376 		return -EINVAL;
3377 	}
3378 
3379 	*dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
3380 
3381 	return 0;
3382 }
3383 
3384 int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
3385 {
3386 	if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
3387 		u8 min, max;
3388 
3389 		min = (u8)RESC_START(p_hwfn, QED_VPORT);
3390 		max = min + RESC_NUM(p_hwfn, QED_VPORT);
3391 		DP_NOTICE(p_hwfn,
3392 			  "vport id [%d] is not valid, available indices [%d - %d]\n",
3393 			  src_id, min, max);
3394 
3395 		return -EINVAL;
3396 	}
3397 
3398 	*dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
3399 
3400 	return 0;
3401 }
3402 
3403 int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
3404 {
3405 	if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
3406 		u8 min, max;
3407 
3408 		min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
3409 		max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
3410 		DP_NOTICE(p_hwfn,
3411 			  "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
3412 			  src_id, min, max);
3413 
3414 		return -EINVAL;
3415 	}
3416 
3417 	*dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
3418 
3419 	return 0;
3420 }
3421 
3422 static void qed_llh_mac_to_filter(u32 *p_high, u32 *p_low,
3423 				  u8 *p_filter)
3424 {
3425 	*p_high = p_filter[1] | (p_filter[0] << 8);
3426 	*p_low = p_filter[5] | (p_filter[4] << 8) |
3427 		 (p_filter[3] << 16) | (p_filter[2] << 24);
3428 }
3429 
3430 int qed_llh_add_mac_filter(struct qed_hwfn *p_hwfn,
3431 			   struct qed_ptt *p_ptt, u8 *p_filter)
3432 {
3433 	u32 high = 0, low = 0, en;
3434 	int i;
3435 
3436 	if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
3437 		return 0;
3438 
3439 	qed_llh_mac_to_filter(&high, &low, p_filter);
3440 
3441 	/* Find a free entry and utilize it */
3442 	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
3443 		en = qed_rd(p_hwfn, p_ptt,
3444 			    NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
3445 		if (en)
3446 			continue;
3447 		qed_wr(p_hwfn, p_ptt,
3448 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
3449 		       2 * i * sizeof(u32), low);
3450 		qed_wr(p_hwfn, p_ptt,
3451 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
3452 		       (2 * i + 1) * sizeof(u32), high);
3453 		qed_wr(p_hwfn, p_ptt,
3454 		       NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
3455 		qed_wr(p_hwfn, p_ptt,
3456 		       NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
3457 		       i * sizeof(u32), 0);
3458 		qed_wr(p_hwfn, p_ptt,
3459 		       NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
3460 		break;
3461 	}
3462 	if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
3463 		DP_NOTICE(p_hwfn,
3464 			  "Failed to find an empty LLH filter to utilize\n");
3465 		return -EINVAL;
3466 	}
3467 
3468 	DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3469 		   "mac: %pM is added at %d\n",
3470 		   p_filter, i);
3471 
3472 	return 0;
3473 }
3474 
3475 void qed_llh_remove_mac_filter(struct qed_hwfn *p_hwfn,
3476 			       struct qed_ptt *p_ptt, u8 *p_filter)
3477 {
3478 	u32 high = 0, low = 0;
3479 	int i;
3480 
3481 	if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
3482 		return;
3483 
3484 	qed_llh_mac_to_filter(&high, &low, p_filter);
3485 
3486 	/* Find the entry and clean it */
3487 	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
3488 		if (qed_rd(p_hwfn, p_ptt,
3489 			   NIG_REG_LLH_FUNC_FILTER_VALUE +
3490 			   2 * i * sizeof(u32)) != low)
3491 			continue;
3492 		if (qed_rd(p_hwfn, p_ptt,
3493 			   NIG_REG_LLH_FUNC_FILTER_VALUE +
3494 			   (2 * i + 1) * sizeof(u32)) != high)
3495 			continue;
3496 
3497 		qed_wr(p_hwfn, p_ptt,
3498 		       NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
3499 		qed_wr(p_hwfn, p_ptt,
3500 		       NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
3501 		qed_wr(p_hwfn, p_ptt,
3502 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
3503 		       (2 * i + 1) * sizeof(u32), 0);
3504 
3505 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3506 			   "mac: %pM is removed from %d\n",
3507 			   p_filter, i);
3508 		break;
3509 	}
3510 	if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
3511 		DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
3512 }
3513 
3514 int
3515 qed_llh_add_protocol_filter(struct qed_hwfn *p_hwfn,
3516 			    struct qed_ptt *p_ptt,
3517 			    u16 source_port_or_eth_type,
3518 			    u16 dest_port, enum qed_llh_port_filter_type_t type)
3519 {
3520 	u32 high = 0, low = 0, en;
3521 	int i;
3522 
3523 	if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
3524 		return 0;
3525 
3526 	switch (type) {
3527 	case QED_LLH_FILTER_ETHERTYPE:
3528 		high = source_port_or_eth_type;
3529 		break;
3530 	case QED_LLH_FILTER_TCP_SRC_PORT:
3531 	case QED_LLH_FILTER_UDP_SRC_PORT:
3532 		low = source_port_or_eth_type << 16;
3533 		break;
3534 	case QED_LLH_FILTER_TCP_DEST_PORT:
3535 	case QED_LLH_FILTER_UDP_DEST_PORT:
3536 		low = dest_port;
3537 		break;
3538 	case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
3539 	case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
3540 		low = (source_port_or_eth_type << 16) | dest_port;
3541 		break;
3542 	default:
3543 		DP_NOTICE(p_hwfn,
3544 			  "Non valid LLH protocol filter type %d\n", type);
3545 		return -EINVAL;
3546 	}
3547 	/* Find a free entry and utilize it */
3548 	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
3549 		en = qed_rd(p_hwfn, p_ptt,
3550 			    NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
3551 		if (en)
3552 			continue;
3553 		qed_wr(p_hwfn, p_ptt,
3554 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
3555 		       2 * i * sizeof(u32), low);
3556 		qed_wr(p_hwfn, p_ptt,
3557 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
3558 		       (2 * i + 1) * sizeof(u32), high);
3559 		qed_wr(p_hwfn, p_ptt,
3560 		       NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
3561 		qed_wr(p_hwfn, p_ptt,
3562 		       NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
3563 		       i * sizeof(u32), 1 << type);
3564 		qed_wr(p_hwfn, p_ptt,
3565 		       NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
3566 		break;
3567 	}
3568 	if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
3569 		DP_NOTICE(p_hwfn,
3570 			  "Failed to find an empty LLH filter to utilize\n");
3571 		return -EINVAL;
3572 	}
3573 	switch (type) {
3574 	case QED_LLH_FILTER_ETHERTYPE:
3575 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3576 			   "ETH type %x is added at %d\n",
3577 			   source_port_or_eth_type, i);
3578 		break;
3579 	case QED_LLH_FILTER_TCP_SRC_PORT:
3580 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3581 			   "TCP src port %x is added at %d\n",
3582 			   source_port_or_eth_type, i);
3583 		break;
3584 	case QED_LLH_FILTER_UDP_SRC_PORT:
3585 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3586 			   "UDP src port %x is added at %d\n",
3587 			   source_port_or_eth_type, i);
3588 		break;
3589 	case QED_LLH_FILTER_TCP_DEST_PORT:
3590 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3591 			   "TCP dst port %x is added at %d\n", dest_port, i);
3592 		break;
3593 	case QED_LLH_FILTER_UDP_DEST_PORT:
3594 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3595 			   "UDP dst port %x is added at %d\n", dest_port, i);
3596 		break;
3597 	case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
3598 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3599 			   "TCP src/dst ports %x/%x are added at %d\n",
3600 			   source_port_or_eth_type, dest_port, i);
3601 		break;
3602 	case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
3603 		DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
3604 			   "UDP src/dst ports %x/%x are added at %d\n",
3605 			   source_port_or_eth_type, dest_port, i);
3606 		break;
3607 	}
3608 	return 0;
3609 }
3610 
3611 void
3612 qed_llh_remove_protocol_filter(struct qed_hwfn *p_hwfn,
3613 			       struct qed_ptt *p_ptt,
3614 			       u16 source_port_or_eth_type,
3615 			       u16 dest_port,
3616 			       enum qed_llh_port_filter_type_t type)
3617 {
3618 	u32 high = 0, low = 0;
3619 	int i;
3620 
3621 	if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
3622 		return;
3623 
3624 	switch (type) {
3625 	case QED_LLH_FILTER_ETHERTYPE:
3626 		high = source_port_or_eth_type;
3627 		break;
3628 	case QED_LLH_FILTER_TCP_SRC_PORT:
3629 	case QED_LLH_FILTER_UDP_SRC_PORT:
3630 		low = source_port_or_eth_type << 16;
3631 		break;
3632 	case QED_LLH_FILTER_TCP_DEST_PORT:
3633 	case QED_LLH_FILTER_UDP_DEST_PORT:
3634 		low = dest_port;
3635 		break;
3636 	case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
3637 	case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
3638 		low = (source_port_or_eth_type << 16) | dest_port;
3639 		break;
3640 	default:
3641 		DP_NOTICE(p_hwfn,
3642 			  "Non valid LLH protocol filter type %d\n", type);
3643 		return;
3644 	}
3645 
3646 	for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
3647 		if (!qed_rd(p_hwfn, p_ptt,
3648 			    NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)))
3649 			continue;
3650 		if (!qed_rd(p_hwfn, p_ptt,
3651 			    NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32)))
3652 			continue;
3653 		if (!(qed_rd(p_hwfn, p_ptt,
3654 			     NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
3655 			     i * sizeof(u32)) & BIT(type)))
3656 			continue;
3657 		if (qed_rd(p_hwfn, p_ptt,
3658 			   NIG_REG_LLH_FUNC_FILTER_VALUE +
3659 			   2 * i * sizeof(u32)) != low)
3660 			continue;
3661 		if (qed_rd(p_hwfn, p_ptt,
3662 			   NIG_REG_LLH_FUNC_FILTER_VALUE +
3663 			   (2 * i + 1) * sizeof(u32)) != high)
3664 			continue;
3665 
3666 		qed_wr(p_hwfn, p_ptt,
3667 		       NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
3668 		qed_wr(p_hwfn, p_ptt,
3669 		       NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
3670 		qed_wr(p_hwfn, p_ptt,
3671 		       NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
3672 		       i * sizeof(u32), 0);
3673 		qed_wr(p_hwfn, p_ptt,
3674 		       NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * i * sizeof(u32), 0);
3675 		qed_wr(p_hwfn, p_ptt,
3676 		       NIG_REG_LLH_FUNC_FILTER_VALUE +
3677 		       (2 * i + 1) * sizeof(u32), 0);
3678 		break;
3679 	}
3680 
3681 	if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
3682 		DP_NOTICE(p_hwfn, "Tried to remove a non-configured filter\n");
3683 }
3684 
3685 static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3686 			    u32 hw_addr, void *p_eth_qzone,
3687 			    size_t eth_qzone_size, u8 timeset)
3688 {
3689 	struct coalescing_timeset *p_coal_timeset;
3690 
3691 	if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) {
3692 		DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n");
3693 		return -EINVAL;
3694 	}
3695 
3696 	p_coal_timeset = p_eth_qzone;
3697 	memset(p_eth_qzone, 0, eth_qzone_size);
3698 	SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
3699 	SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
3700 	qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
3701 
3702 	return 0;
3703 }
3704 
3705 int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle)
3706 {
3707 	struct qed_queue_cid *p_cid = p_handle;
3708 	struct qed_hwfn *p_hwfn;
3709 	struct qed_ptt *p_ptt;
3710 	int rc = 0;
3711 
3712 	p_hwfn = p_cid->p_owner;
3713 
3714 	if (IS_VF(p_hwfn->cdev))
3715 		return qed_vf_pf_set_coalesce(p_hwfn, rx_coal, tx_coal, p_cid);
3716 
3717 	p_ptt = qed_ptt_acquire(p_hwfn);
3718 	if (!p_ptt)
3719 		return -EAGAIN;
3720 
3721 	if (rx_coal) {
3722 		rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3723 		if (rc)
3724 			goto out;
3725 		p_hwfn->cdev->rx_coalesce_usecs = rx_coal;
3726 	}
3727 
3728 	if (tx_coal) {
3729 		rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
3730 		if (rc)
3731 			goto out;
3732 		p_hwfn->cdev->tx_coalesce_usecs = tx_coal;
3733 	}
3734 out:
3735 	qed_ptt_release(p_hwfn, p_ptt);
3736 	return rc;
3737 }
3738 
3739 int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn,
3740 			 struct qed_ptt *p_ptt,
3741 			 u16 coalesce, struct qed_queue_cid *p_cid)
3742 {
3743 	struct ustorm_eth_queue_zone eth_qzone;
3744 	u8 timeset, timer_res;
3745 	u32 address;
3746 	int rc;
3747 
3748 	/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
3749 	if (coalesce <= 0x7F) {
3750 		timer_res = 0;
3751 	} else if (coalesce <= 0xFF) {
3752 		timer_res = 1;
3753 	} else if (coalesce <= 0x1FF) {
3754 		timer_res = 2;
3755 	} else {
3756 		DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
3757 		return -EINVAL;
3758 	}
3759 	timeset = (u8)(coalesce >> timer_res);
3760 
3761 	rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res,
3762 				   p_cid->sb_igu_id, false);
3763 	if (rc)
3764 		goto out;
3765 
3766 	address = BAR0_MAP_REG_USDM_RAM +
3767 		  USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
3768 
3769 	rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
3770 			      sizeof(struct ustorm_eth_queue_zone), timeset);
3771 	if (rc)
3772 		goto out;
3773 
3774 out:
3775 	return rc;
3776 }
3777 
3778 int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn,
3779 			 struct qed_ptt *p_ptt,
3780 			 u16 coalesce, struct qed_queue_cid *p_cid)
3781 {
3782 	struct xstorm_eth_queue_zone eth_qzone;
3783 	u8 timeset, timer_res;
3784 	u32 address;
3785 	int rc;
3786 
3787 	/* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
3788 	if (coalesce <= 0x7F) {
3789 		timer_res = 0;
3790 	} else if (coalesce <= 0xFF) {
3791 		timer_res = 1;
3792 	} else if (coalesce <= 0x1FF) {
3793 		timer_res = 2;
3794 	} else {
3795 		DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
3796 		return -EINVAL;
3797 	}
3798 	timeset = (u8)(coalesce >> timer_res);
3799 
3800 	rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res,
3801 				   p_cid->sb_igu_id, true);
3802 	if (rc)
3803 		goto out;
3804 
3805 	address = BAR0_MAP_REG_XSDM_RAM +
3806 		  XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
3807 
3808 	rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
3809 			      sizeof(struct xstorm_eth_queue_zone), timeset);
3810 out:
3811 	return rc;
3812 }
3813 
3814 /* Calculate final WFQ values for all vports and configure them.
3815  * After this configuration each vport will have
3816  * approx min rate =  min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
3817  */
3818 static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
3819 					     struct qed_ptt *p_ptt,
3820 					     u32 min_pf_rate)
3821 {
3822 	struct init_qm_vport_params *vport_params;
3823 	int i;
3824 
3825 	vport_params = p_hwfn->qm_info.qm_vport_params;
3826 
3827 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
3828 		u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
3829 
3830 		vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
3831 						min_pf_rate;
3832 		qed_init_vport_wfq(p_hwfn, p_ptt,
3833 				   vport_params[i].first_tx_pq_id,
3834 				   vport_params[i].vport_wfq);
3835 	}
3836 }
3837 
3838 static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
3839 				       u32 min_pf_rate)
3840 
3841 {
3842 	int i;
3843 
3844 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
3845 		p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
3846 }
3847 
3848 static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
3849 					   struct qed_ptt *p_ptt,
3850 					   u32 min_pf_rate)
3851 {
3852 	struct init_qm_vport_params *vport_params;
3853 	int i;
3854 
3855 	vport_params = p_hwfn->qm_info.qm_vport_params;
3856 
3857 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
3858 		qed_init_wfq_default_param(p_hwfn, min_pf_rate);
3859 		qed_init_vport_wfq(p_hwfn, p_ptt,
3860 				   vport_params[i].first_tx_pq_id,
3861 				   vport_params[i].vport_wfq);
3862 	}
3863 }
3864 
3865 /* This function performs several validations for WFQ
3866  * configuration and required min rate for a given vport
3867  * 1. req_rate must be greater than one percent of min_pf_rate.
3868  * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
3869  *    rates to get less than one percent of min_pf_rate.
3870  * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
3871  */
3872 static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
3873 			      u16 vport_id, u32 req_rate, u32 min_pf_rate)
3874 {
3875 	u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
3876 	int non_requested_count = 0, req_count = 0, i, num_vports;
3877 
3878 	num_vports = p_hwfn->qm_info.num_vports;
3879 
3880 	/* Accounting for the vports which are configured for WFQ explicitly */
3881 	for (i = 0; i < num_vports; i++) {
3882 		u32 tmp_speed;
3883 
3884 		if ((i != vport_id) &&
3885 		    p_hwfn->qm_info.wfq_data[i].configured) {
3886 			req_count++;
3887 			tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
3888 			total_req_min_rate += tmp_speed;
3889 		}
3890 	}
3891 
3892 	/* Include current vport data as well */
3893 	req_count++;
3894 	total_req_min_rate += req_rate;
3895 	non_requested_count = num_vports - req_count;
3896 
3897 	if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
3898 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
3899 			   "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
3900 			   vport_id, req_rate, min_pf_rate);
3901 		return -EINVAL;
3902 	}
3903 
3904 	if (num_vports > QED_WFQ_UNIT) {
3905 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
3906 			   "Number of vports is greater than %d\n",
3907 			   QED_WFQ_UNIT);
3908 		return -EINVAL;
3909 	}
3910 
3911 	if (total_req_min_rate > min_pf_rate) {
3912 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
3913 			   "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
3914 			   total_req_min_rate, min_pf_rate);
3915 		return -EINVAL;
3916 	}
3917 
3918 	total_left_rate	= min_pf_rate - total_req_min_rate;
3919 
3920 	left_rate_per_vp = total_left_rate / non_requested_count;
3921 	if (left_rate_per_vp <  min_pf_rate / QED_WFQ_UNIT) {
3922 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
3923 			   "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
3924 			   left_rate_per_vp, min_pf_rate);
3925 		return -EINVAL;
3926 	}
3927 
3928 	p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
3929 	p_hwfn->qm_info.wfq_data[vport_id].configured = true;
3930 
3931 	for (i = 0; i < num_vports; i++) {
3932 		if (p_hwfn->qm_info.wfq_data[i].configured)
3933 			continue;
3934 
3935 		p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
3936 	}
3937 
3938 	return 0;
3939 }
3940 
3941 static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
3942 				     struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
3943 {
3944 	struct qed_mcp_link_state *p_link;
3945 	int rc = 0;
3946 
3947 	p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
3948 
3949 	if (!p_link->min_pf_rate) {
3950 		p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
3951 		p_hwfn->qm_info.wfq_data[vp_id].configured = true;
3952 		return rc;
3953 	}
3954 
3955 	rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
3956 
3957 	if (!rc)
3958 		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
3959 						 p_link->min_pf_rate);
3960 	else
3961 		DP_NOTICE(p_hwfn,
3962 			  "Validation failed while configuring min rate\n");
3963 
3964 	return rc;
3965 }
3966 
3967 static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
3968 						 struct qed_ptt *p_ptt,
3969 						 u32 min_pf_rate)
3970 {
3971 	bool use_wfq = false;
3972 	int rc = 0;
3973 	u16 i;
3974 
3975 	/* Validate all pre configured vports for wfq */
3976 	for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
3977 		u32 rate;
3978 
3979 		if (!p_hwfn->qm_info.wfq_data[i].configured)
3980 			continue;
3981 
3982 		rate = p_hwfn->qm_info.wfq_data[i].min_speed;
3983 		use_wfq = true;
3984 
3985 		rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
3986 		if (rc) {
3987 			DP_NOTICE(p_hwfn,
3988 				  "WFQ validation failed while configuring min rate\n");
3989 			break;
3990 		}
3991 	}
3992 
3993 	if (!rc && use_wfq)
3994 		qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
3995 	else
3996 		qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
3997 
3998 	return rc;
3999 }
4000 
4001 /* Main API for qed clients to configure vport min rate.
4002  * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
4003  * rate - Speed in Mbps needs to be assigned to a given vport.
4004  */
4005 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
4006 {
4007 	int i, rc = -EINVAL;
4008 
4009 	/* Currently not supported; Might change in future */
4010 	if (cdev->num_hwfns > 1) {
4011 		DP_NOTICE(cdev,
4012 			  "WFQ configuration is not supported for this device\n");
4013 		return rc;
4014 	}
4015 
4016 	for_each_hwfn(cdev, i) {
4017 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4018 		struct qed_ptt *p_ptt;
4019 
4020 		p_ptt = qed_ptt_acquire(p_hwfn);
4021 		if (!p_ptt)
4022 			return -EBUSY;
4023 
4024 		rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
4025 
4026 		if (rc) {
4027 			qed_ptt_release(p_hwfn, p_ptt);
4028 			return rc;
4029 		}
4030 
4031 		qed_ptt_release(p_hwfn, p_ptt);
4032 	}
4033 
4034 	return rc;
4035 }
4036 
4037 /* API to configure WFQ from mcp link change */
4038 void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
4039 					 struct qed_ptt *p_ptt, u32 min_pf_rate)
4040 {
4041 	int i;
4042 
4043 	if (cdev->num_hwfns > 1) {
4044 		DP_VERBOSE(cdev,
4045 			   NETIF_MSG_LINK,
4046 			   "WFQ configuration is not supported for this device\n");
4047 		return;
4048 	}
4049 
4050 	for_each_hwfn(cdev, i) {
4051 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4052 
4053 		__qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
4054 						      min_pf_rate);
4055 	}
4056 }
4057 
4058 int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
4059 				     struct qed_ptt *p_ptt,
4060 				     struct qed_mcp_link_state *p_link,
4061 				     u8 max_bw)
4062 {
4063 	int rc = 0;
4064 
4065 	p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
4066 
4067 	if (!p_link->line_speed && (max_bw != 100))
4068 		return rc;
4069 
4070 	p_link->speed = (p_link->line_speed * max_bw) / 100;
4071 	p_hwfn->qm_info.pf_rl = p_link->speed;
4072 
4073 	/* Since the limiter also affects Tx-switched traffic, we don't want it
4074 	 * to limit such traffic in case there's no actual limit.
4075 	 * In that case, set limit to imaginary high boundary.
4076 	 */
4077 	if (max_bw == 100)
4078 		p_hwfn->qm_info.pf_rl = 100000;
4079 
4080 	rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
4081 			    p_hwfn->qm_info.pf_rl);
4082 
4083 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
4084 		   "Configured MAX bandwidth to be %08x Mb/sec\n",
4085 		   p_link->speed);
4086 
4087 	return rc;
4088 }
4089 
4090 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
4091 int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
4092 {
4093 	int i, rc = -EINVAL;
4094 
4095 	if (max_bw < 1 || max_bw > 100) {
4096 		DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
4097 		return rc;
4098 	}
4099 
4100 	for_each_hwfn(cdev, i) {
4101 		struct qed_hwfn	*p_hwfn = &cdev->hwfns[i];
4102 		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
4103 		struct qed_mcp_link_state *p_link;
4104 		struct qed_ptt *p_ptt;
4105 
4106 		p_link = &p_lead->mcp_info->link_output;
4107 
4108 		p_ptt = qed_ptt_acquire(p_hwfn);
4109 		if (!p_ptt)
4110 			return -EBUSY;
4111 
4112 		rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
4113 						      p_link, max_bw);
4114 
4115 		qed_ptt_release(p_hwfn, p_ptt);
4116 
4117 		if (rc)
4118 			break;
4119 	}
4120 
4121 	return rc;
4122 }
4123 
4124 int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
4125 				     struct qed_ptt *p_ptt,
4126 				     struct qed_mcp_link_state *p_link,
4127 				     u8 min_bw)
4128 {
4129 	int rc = 0;
4130 
4131 	p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
4132 	p_hwfn->qm_info.pf_wfq = min_bw;
4133 
4134 	if (!p_link->line_speed)
4135 		return rc;
4136 
4137 	p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
4138 
4139 	rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
4140 
4141 	DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
4142 		   "Configured MIN bandwidth to be %d Mb/sec\n",
4143 		   p_link->min_pf_rate);
4144 
4145 	return rc;
4146 }
4147 
4148 /* Main API to configure PF min bandwidth where bw range is [1-100] */
4149 int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
4150 {
4151 	int i, rc = -EINVAL;
4152 
4153 	if (min_bw < 1 || min_bw > 100) {
4154 		DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
4155 		return rc;
4156 	}
4157 
4158 	for_each_hwfn(cdev, i) {
4159 		struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
4160 		struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
4161 		struct qed_mcp_link_state *p_link;
4162 		struct qed_ptt *p_ptt;
4163 
4164 		p_link = &p_lead->mcp_info->link_output;
4165 
4166 		p_ptt = qed_ptt_acquire(p_hwfn);
4167 		if (!p_ptt)
4168 			return -EBUSY;
4169 
4170 		rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
4171 						      p_link, min_bw);
4172 		if (rc) {
4173 			qed_ptt_release(p_hwfn, p_ptt);
4174 			return rc;
4175 		}
4176 
4177 		if (p_link->min_pf_rate) {
4178 			u32 min_rate = p_link->min_pf_rate;
4179 
4180 			rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
4181 								   p_ptt,
4182 								   min_rate);
4183 		}
4184 
4185 		qed_ptt_release(p_hwfn, p_ptt);
4186 	}
4187 
4188 	return rc;
4189 }
4190 
4191 void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
4192 {
4193 	struct qed_mcp_link_state *p_link;
4194 
4195 	p_link = &p_hwfn->mcp_info->link_output;
4196 
4197 	if (p_link->min_pf_rate)
4198 		qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
4199 					       p_link->min_pf_rate);
4200 
4201 	memset(p_hwfn->qm_info.wfq_data, 0,
4202 	       sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
4203 }
4204 
4205 int qed_device_num_engines(struct qed_dev *cdev)
4206 {
4207 	return QED_IS_BB(cdev) ? 2 : 1;
4208 }
4209 
4210 static int qed_device_num_ports(struct qed_dev *cdev)
4211 {
4212 	/* in CMT always only one port */
4213 	if (cdev->num_hwfns > 1)
4214 		return 1;
4215 
4216 	return cdev->num_ports_in_engine * qed_device_num_engines(cdev);
4217 }
4218 
4219 int qed_device_get_port_id(struct qed_dev *cdev)
4220 {
4221 	return (QED_LEADING_HWFN(cdev)->abs_pf_id) % qed_device_num_ports(cdev);
4222 }
4223 
4224 void qed_set_fw_mac_addr(__le16 *fw_msb,
4225 			 __le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
4226 {
4227 	((u8 *)fw_msb)[0] = mac[1];
4228 	((u8 *)fw_msb)[1] = mac[0];
4229 	((u8 *)fw_mid)[0] = mac[3];
4230 	((u8 *)fw_mid)[1] = mac[2];
4231 	((u8 *)fw_lsb)[0] = mac[5];
4232 	((u8 *)fw_lsb)[1] = mac[4];
4233 }
4234