1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3  * Copyright (c) 2015-2017  QLogic Corporation
4  * Copyright (c) 2019-2020 Marvell International Ltd.
5  */
6 
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/string.h>
15 #include <linux/etherdevice.h>
16 #include "qed.h"
17 #include "qed_cxt.h"
18 #include "qed_dcbx.h"
19 #include "qed_hsi.h"
20 #include "qed_hw.h"
21 #include "qed_mcp.h"
22 #include "qed_reg_addr.h"
23 #include "qed_sriov.h"
24 
25 #define GRCBASE_MCP     0xe00000
26 
27 #define QED_MCP_RESP_ITER_US	10
28 
29 #define QED_DRV_MB_MAX_RETRIES	(500 * 1000)	/* Account for 5 sec */
30 #define QED_MCP_RESET_RETRIES	(50 * 1000)	/* Account for 500 msec */
31 
32 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val)	     \
33 	qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
34 	       _val)
35 
36 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
37 	qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
38 
39 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val)  \
40 	DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
41 		     offsetof(struct public_drv_mb, _field), _val)
42 
43 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field)	   \
44 	DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
45 		     offsetof(struct public_drv_mb, _field))
46 
47 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
48 		  DRV_ID_PDA_COMP_VER_SHIFT)
49 
50 #define MCP_BYTES_PER_MBIT_SHIFT 17
51 
52 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
53 {
54 	if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
55 		return false;
56 	return true;
57 }
58 
59 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
60 {
61 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
62 					PUBLIC_PORT);
63 	u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
64 
65 	p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
66 						   MFW_PORT(p_hwfn));
67 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
68 		   "port_addr = 0x%x, port_id 0x%02x\n",
69 		   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
70 }
71 
72 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
73 {
74 	u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
75 	u32 tmp, i;
76 
77 	if (!p_hwfn->mcp_info->public_base)
78 		return;
79 
80 	for (i = 0; i < length; i++) {
81 		tmp = qed_rd(p_hwfn, p_ptt,
82 			     p_hwfn->mcp_info->mfw_mb_addr +
83 			     (i << 2) + sizeof(u32));
84 
85 		/* The MB data is actually BE; Need to force it to cpu */
86 		((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
87 			be32_to_cpu((__force __be32)tmp);
88 	}
89 }
90 
91 struct qed_mcp_cmd_elem {
92 	struct list_head list;
93 	struct qed_mcp_mb_params *p_mb_params;
94 	u16 expected_seq_num;
95 	bool b_is_completed;
96 };
97 
98 /* Must be called while cmd_lock is acquired */
99 static struct qed_mcp_cmd_elem *
100 qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
101 		     struct qed_mcp_mb_params *p_mb_params,
102 		     u16 expected_seq_num)
103 {
104 	struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
105 
106 	p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
107 	if (!p_cmd_elem)
108 		goto out;
109 
110 	p_cmd_elem->p_mb_params = p_mb_params;
111 	p_cmd_elem->expected_seq_num = expected_seq_num;
112 	list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
113 out:
114 	return p_cmd_elem;
115 }
116 
117 /* Must be called while cmd_lock is acquired */
118 static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
119 				 struct qed_mcp_cmd_elem *p_cmd_elem)
120 {
121 	list_del(&p_cmd_elem->list);
122 	kfree(p_cmd_elem);
123 }
124 
125 /* Must be called while cmd_lock is acquired */
126 static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
127 						     u16 seq_num)
128 {
129 	struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
130 
131 	list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
132 		if (p_cmd_elem->expected_seq_num == seq_num)
133 			return p_cmd_elem;
134 	}
135 
136 	return NULL;
137 }
138 
139 int qed_mcp_free(struct qed_hwfn *p_hwfn)
140 {
141 	if (p_hwfn->mcp_info) {
142 		struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
143 
144 		kfree(p_hwfn->mcp_info->mfw_mb_cur);
145 		kfree(p_hwfn->mcp_info->mfw_mb_shadow);
146 
147 		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
148 		list_for_each_entry_safe(p_cmd_elem,
149 					 p_tmp,
150 					 &p_hwfn->mcp_info->cmd_list, list) {
151 			qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
152 		}
153 		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
154 	}
155 
156 	kfree(p_hwfn->mcp_info);
157 	p_hwfn->mcp_info = NULL;
158 
159 	return 0;
160 }
161 
162 /* Maximum of 1 sec to wait for the SHMEM ready indication */
163 #define QED_MCP_SHMEM_RDY_MAX_RETRIES	20
164 #define QED_MCP_SHMEM_RDY_ITER_MS	50
165 
166 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
167 {
168 	struct qed_mcp_info *p_info = p_hwfn->mcp_info;
169 	u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
170 	u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
171 	u32 drv_mb_offsize, mfw_mb_offsize;
172 	u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
173 
174 	p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
175 	if (!p_info->public_base) {
176 		DP_NOTICE(p_hwfn,
177 			  "The address of the MCP scratch-pad is not configured\n");
178 		return -EINVAL;
179 	}
180 
181 	p_info->public_base |= GRCBASE_MCP;
182 
183 	/* Get the MFW MB address and number of supported messages */
184 	mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
185 				SECTION_OFFSIZE_ADDR(p_info->public_base,
186 						     PUBLIC_MFW_MB));
187 	p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
188 	p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
189 					    p_info->mfw_mb_addr +
190 					    offsetof(struct public_mfw_mb,
191 						     sup_msgs));
192 
193 	/* The driver can notify that there was an MCP reset, and might read the
194 	 * SHMEM values before the MFW has completed initializing them.
195 	 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
196 	 * data ready indication.
197 	 */
198 	while (!p_info->mfw_mb_length && --cnt) {
199 		msleep(msec);
200 		p_info->mfw_mb_length =
201 			(u16)qed_rd(p_hwfn, p_ptt,
202 				    p_info->mfw_mb_addr +
203 				    offsetof(struct public_mfw_mb, sup_msgs));
204 	}
205 
206 	if (!cnt) {
207 		DP_NOTICE(p_hwfn,
208 			  "Failed to get the SHMEM ready notification after %d msec\n",
209 			  QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
210 		return -EBUSY;
211 	}
212 
213 	/* Calculate the driver and MFW mailbox address */
214 	drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
215 				SECTION_OFFSIZE_ADDR(p_info->public_base,
216 						     PUBLIC_DRV_MB));
217 	p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
218 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
219 		   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
220 		   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
221 
222 	/* Get the current driver mailbox sequence before sending
223 	 * the first command
224 	 */
225 	p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
226 			     DRV_MSG_SEQ_NUMBER_MASK;
227 
228 	/* Get current FW pulse sequence */
229 	p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
230 				DRV_PULSE_SEQ_MASK;
231 
232 	p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
233 
234 	return 0;
235 }
236 
237 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
238 {
239 	struct qed_mcp_info *p_info;
240 	u32 size;
241 
242 	/* Allocate mcp_info structure */
243 	p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
244 	if (!p_hwfn->mcp_info)
245 		goto err;
246 	p_info = p_hwfn->mcp_info;
247 
248 	/* Initialize the MFW spinlock */
249 	spin_lock_init(&p_info->cmd_lock);
250 	spin_lock_init(&p_info->link_lock);
251 
252 	INIT_LIST_HEAD(&p_info->cmd_list);
253 
254 	if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
255 		DP_NOTICE(p_hwfn, "MCP is not initialized\n");
256 		/* Do not free mcp_info here, since public_base indicate that
257 		 * the MCP is not initialized
258 		 */
259 		return 0;
260 	}
261 
262 	size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
263 	p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
264 	p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
265 	if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
266 		goto err;
267 
268 	return 0;
269 
270 err:
271 	qed_mcp_free(p_hwfn);
272 	return -ENOMEM;
273 }
274 
275 static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
276 				   struct qed_ptt *p_ptt)
277 {
278 	u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
279 
280 	/* Use MCP history register to check if MCP reset occurred between init
281 	 * time and now.
282 	 */
283 	if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
284 		DP_VERBOSE(p_hwfn,
285 			   QED_MSG_SP,
286 			   "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
287 			   p_hwfn->mcp_info->mcp_hist, generic_por_0);
288 
289 		qed_load_mcp_offsets(p_hwfn, p_ptt);
290 		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
291 	}
292 }
293 
294 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
295 {
296 	u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
297 	int rc = 0;
298 
299 	if (p_hwfn->mcp_info->b_block_cmd) {
300 		DP_NOTICE(p_hwfn,
301 			  "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
302 		return -EBUSY;
303 	}
304 
305 	/* Ensure that only a single thread is accessing the mailbox */
306 	spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
307 
308 	org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
309 
310 	/* Set drv command along with the updated sequence */
311 	qed_mcp_reread_offsets(p_hwfn, p_ptt);
312 	seq = ++p_hwfn->mcp_info->drv_mb_seq;
313 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
314 
315 	do {
316 		/* Wait for MFW response */
317 		udelay(delay);
318 		/* Give the FW up to 500 second (50*1000*10usec) */
319 	} while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
320 					      MISCS_REG_GENERIC_POR_0)) &&
321 		 (cnt++ < QED_MCP_RESET_RETRIES));
322 
323 	if (org_mcp_reset_seq !=
324 	    qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
325 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
326 			   "MCP was reset after %d usec\n", cnt * delay);
327 	} else {
328 		DP_ERR(p_hwfn, "Failed to reset MCP\n");
329 		rc = -EAGAIN;
330 	}
331 
332 	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
333 
334 	return rc;
335 }
336 
337 /* Must be called while cmd_lock is acquired */
338 static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
339 {
340 	struct qed_mcp_cmd_elem *p_cmd_elem;
341 
342 	/* There is at most one pending command at a certain time, and if it
343 	 * exists - it is placed at the HEAD of the list.
344 	 */
345 	if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
346 		p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
347 					      struct qed_mcp_cmd_elem, list);
348 		return !p_cmd_elem->b_is_completed;
349 	}
350 
351 	return false;
352 }
353 
354 /* Must be called while cmd_lock is acquired */
355 static int
356 qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
357 {
358 	struct qed_mcp_mb_params *p_mb_params;
359 	struct qed_mcp_cmd_elem *p_cmd_elem;
360 	u32 mcp_resp;
361 	u16 seq_num;
362 
363 	mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
364 	seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
365 
366 	/* Return if no new non-handled response has been received */
367 	if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
368 		return -EAGAIN;
369 
370 	p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
371 	if (!p_cmd_elem) {
372 		DP_ERR(p_hwfn,
373 		       "Failed to find a pending mailbox cmd that expects sequence number %d\n",
374 		       seq_num);
375 		return -EINVAL;
376 	}
377 
378 	p_mb_params = p_cmd_elem->p_mb_params;
379 
380 	/* Get the MFW response along with the sequence number */
381 	p_mb_params->mcp_resp = mcp_resp;
382 
383 	/* Get the MFW param */
384 	p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
385 
386 	/* Get the union data */
387 	if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
388 		u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
389 				      offsetof(struct public_drv_mb,
390 					       union_data);
391 		qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
392 				union_data_addr, p_mb_params->data_dst_size);
393 	}
394 
395 	p_cmd_elem->b_is_completed = true;
396 
397 	return 0;
398 }
399 
400 /* Must be called while cmd_lock is acquired */
401 static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
402 				    struct qed_ptt *p_ptt,
403 				    struct qed_mcp_mb_params *p_mb_params,
404 				    u16 seq_num)
405 {
406 	union drv_union_data union_data;
407 	u32 union_data_addr;
408 
409 	/* Set the union data */
410 	union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
411 			  offsetof(struct public_drv_mb, union_data);
412 	memset(&union_data, 0, sizeof(union_data));
413 	if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
414 		memcpy(&union_data, p_mb_params->p_data_src,
415 		       p_mb_params->data_src_size);
416 	qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
417 		      sizeof(union_data));
418 
419 	/* Set the drv param */
420 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
421 
422 	/* Set the drv command along with the sequence number */
423 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
424 
425 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
426 		   "MFW mailbox: command 0x%08x param 0x%08x\n",
427 		   (p_mb_params->cmd | seq_num), p_mb_params->param);
428 }
429 
430 static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
431 {
432 	p_hwfn->mcp_info->b_block_cmd = block_cmd;
433 
434 	DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
435 		block_cmd ? "Block" : "Unblock");
436 }
437 
438 static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
439 				   struct qed_ptt *p_ptt)
440 {
441 	u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
442 	u32 delay = QED_MCP_RESP_ITER_US;
443 
444 	cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
445 	cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
446 	cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
447 	udelay(delay);
448 	cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
449 	udelay(delay);
450 	cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
451 
452 	DP_NOTICE(p_hwfn,
453 		  "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
454 		  cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
455 }
456 
457 static int
458 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
459 		       struct qed_ptt *p_ptt,
460 		       struct qed_mcp_mb_params *p_mb_params,
461 		       u32 max_retries, u32 usecs)
462 {
463 	u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
464 	struct qed_mcp_cmd_elem *p_cmd_elem;
465 	u16 seq_num;
466 	int rc = 0;
467 
468 	/* Wait until the mailbox is non-occupied */
469 	do {
470 		/* Exit the loop if there is no pending command, or if the
471 		 * pending command is completed during this iteration.
472 		 * The spinlock stays locked until the command is sent.
473 		 */
474 
475 		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
476 
477 		if (!qed_mcp_has_pending_cmd(p_hwfn)) {
478 			spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
479 			break;
480 		}
481 
482 		rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
483 		if (!rc) {
484 			spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
485 			break;
486 		} else if (rc != -EAGAIN) {
487 			goto err;
488 		}
489 
490 		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
491 
492 		if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
493 			msleep(msecs);
494 		else
495 			udelay(usecs);
496 	} while (++cnt < max_retries);
497 
498 	if (cnt >= max_retries) {
499 		DP_NOTICE(p_hwfn,
500 			  "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
501 			  p_mb_params->cmd, p_mb_params->param);
502 		return -EAGAIN;
503 	}
504 
505 	spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
506 
507 	/* Send the mailbox command */
508 	qed_mcp_reread_offsets(p_hwfn, p_ptt);
509 	seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
510 	p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
511 	if (!p_cmd_elem) {
512 		rc = -ENOMEM;
513 		goto err;
514 	}
515 
516 	__qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
517 	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
518 
519 	/* Wait for the MFW response */
520 	do {
521 		/* Exit the loop if the command is already completed, or if the
522 		 * command is completed during this iteration.
523 		 * The spinlock stays locked until the list element is removed.
524 		 */
525 
526 		if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
527 			msleep(msecs);
528 		else
529 			udelay(usecs);
530 
531 		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
532 
533 		if (p_cmd_elem->b_is_completed) {
534 			spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
535 			break;
536 		}
537 
538 		rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
539 		if (!rc) {
540 			spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
541 			break;
542 		} else if (rc != -EAGAIN) {
543 			goto err;
544 		}
545 
546 		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
547 	} while (++cnt < max_retries);
548 
549 	if (cnt >= max_retries) {
550 		DP_NOTICE(p_hwfn,
551 			  "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
552 			  p_mb_params->cmd, p_mb_params->param);
553 		qed_mcp_print_cpu_info(p_hwfn, p_ptt);
554 
555 		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
556 		qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
557 		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
558 
559 		if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
560 			qed_mcp_cmd_set_blocking(p_hwfn, true);
561 
562 		qed_hw_err_notify(p_hwfn, p_ptt,
563 				  QED_HW_ERR_MFW_RESP_FAIL, NULL);
564 		return -EAGAIN;
565 	}
566 
567 	spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
568 	qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
569 	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
570 
571 	DP_VERBOSE(p_hwfn,
572 		   QED_MSG_SP,
573 		   "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
574 		   p_mb_params->mcp_resp,
575 		   p_mb_params->mcp_param,
576 		   (cnt * usecs) / 1000, (cnt * usecs) % 1000);
577 
578 	/* Clear the sequence number from the MFW response */
579 	p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
580 
581 	return 0;
582 
583 err:
584 	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
585 	return rc;
586 }
587 
588 static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
589 				 struct qed_ptt *p_ptt,
590 				 struct qed_mcp_mb_params *p_mb_params)
591 {
592 	size_t union_data_size = sizeof(union drv_union_data);
593 	u32 max_retries = QED_DRV_MB_MAX_RETRIES;
594 	u32 usecs = QED_MCP_RESP_ITER_US;
595 
596 	/* MCP not initialized */
597 	if (!qed_mcp_is_init(p_hwfn)) {
598 		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
599 		return -EBUSY;
600 	}
601 
602 	if (p_hwfn->mcp_info->b_block_cmd) {
603 		DP_NOTICE(p_hwfn,
604 			  "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
605 			  p_mb_params->cmd, p_mb_params->param);
606 		return -EBUSY;
607 	}
608 
609 	if (p_mb_params->data_src_size > union_data_size ||
610 	    p_mb_params->data_dst_size > union_data_size) {
611 		DP_ERR(p_hwfn,
612 		       "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
613 		       p_mb_params->data_src_size,
614 		       p_mb_params->data_dst_size, union_data_size);
615 		return -EINVAL;
616 	}
617 
618 	if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
619 		max_retries = DIV_ROUND_UP(max_retries, 1000);
620 		usecs *= 1000;
621 	}
622 
623 	return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
624 				      usecs);
625 }
626 
627 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
628 		struct qed_ptt *p_ptt,
629 		u32 cmd,
630 		u32 param,
631 		u32 *o_mcp_resp,
632 		u32 *o_mcp_param)
633 {
634 	struct qed_mcp_mb_params mb_params;
635 	int rc;
636 
637 	memset(&mb_params, 0, sizeof(mb_params));
638 	mb_params.cmd = cmd;
639 	mb_params.param = param;
640 
641 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
642 	if (rc)
643 		return rc;
644 
645 	*o_mcp_resp = mb_params.mcp_resp;
646 	*o_mcp_param = mb_params.mcp_param;
647 
648 	return 0;
649 }
650 
651 static int
652 qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
653 		   struct qed_ptt *p_ptt,
654 		   u32 cmd,
655 		   u32 param,
656 		   u32 *o_mcp_resp,
657 		   u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
658 {
659 	struct qed_mcp_mb_params mb_params;
660 	int rc;
661 
662 	memset(&mb_params, 0, sizeof(mb_params));
663 	mb_params.cmd = cmd;
664 	mb_params.param = param;
665 	mb_params.p_data_src = i_buf;
666 	mb_params.data_src_size = (u8)i_txn_size;
667 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
668 	if (rc)
669 		return rc;
670 
671 	*o_mcp_resp = mb_params.mcp_resp;
672 	*o_mcp_param = mb_params.mcp_param;
673 
674 	/* nvm_info needs to be updated */
675 	p_hwfn->nvm_info.valid = false;
676 
677 	return 0;
678 }
679 
680 int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
681 		       struct qed_ptt *p_ptt,
682 		       u32 cmd,
683 		       u32 param,
684 		       u32 *o_mcp_resp,
685 		       u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
686 {
687 	struct qed_mcp_mb_params mb_params;
688 	u8 raw_data[MCP_DRV_NVM_BUF_LEN];
689 	int rc;
690 
691 	memset(&mb_params, 0, sizeof(mb_params));
692 	mb_params.cmd = cmd;
693 	mb_params.param = param;
694 	mb_params.p_data_dst = raw_data;
695 
696 	/* Use the maximal value since the actual one is part of the response */
697 	mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
698 
699 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
700 	if (rc)
701 		return rc;
702 
703 	*o_mcp_resp = mb_params.mcp_resp;
704 	*o_mcp_param = mb_params.mcp_param;
705 
706 	*o_txn_size = *o_mcp_param;
707 	memcpy(o_buf, raw_data, *o_txn_size);
708 
709 	return 0;
710 }
711 
712 static bool
713 qed_mcp_can_force_load(u8 drv_role,
714 		       u8 exist_drv_role,
715 		       enum qed_override_force_load override_force_load)
716 {
717 	bool can_force_load = false;
718 
719 	switch (override_force_load) {
720 	case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
721 		can_force_load = true;
722 		break;
723 	case QED_OVERRIDE_FORCE_LOAD_NEVER:
724 		can_force_load = false;
725 		break;
726 	default:
727 		can_force_load = (drv_role == DRV_ROLE_OS &&
728 				  exist_drv_role == DRV_ROLE_PREBOOT) ||
729 				 (drv_role == DRV_ROLE_KDUMP &&
730 				  exist_drv_role == DRV_ROLE_OS);
731 		break;
732 	}
733 
734 	return can_force_load;
735 }
736 
737 static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
738 				   struct qed_ptt *p_ptt)
739 {
740 	u32 resp = 0, param = 0;
741 	int rc;
742 
743 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
744 			 &resp, &param);
745 	if (rc)
746 		DP_NOTICE(p_hwfn,
747 			  "Failed to send cancel load request, rc = %d\n", rc);
748 
749 	return rc;
750 }
751 
752 #define CONFIG_QEDE_BITMAP_IDX		BIT(0)
753 #define CONFIG_QED_SRIOV_BITMAP_IDX	BIT(1)
754 #define CONFIG_QEDR_BITMAP_IDX		BIT(2)
755 #define CONFIG_QEDF_BITMAP_IDX		BIT(4)
756 #define CONFIG_QEDI_BITMAP_IDX		BIT(5)
757 #define CONFIG_QED_LL2_BITMAP_IDX	BIT(6)
758 
759 static u32 qed_get_config_bitmap(void)
760 {
761 	u32 config_bitmap = 0x0;
762 
763 	if (IS_ENABLED(CONFIG_QEDE))
764 		config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
765 
766 	if (IS_ENABLED(CONFIG_QED_SRIOV))
767 		config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
768 
769 	if (IS_ENABLED(CONFIG_QED_RDMA))
770 		config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
771 
772 	if (IS_ENABLED(CONFIG_QED_FCOE))
773 		config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
774 
775 	if (IS_ENABLED(CONFIG_QED_ISCSI))
776 		config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
777 
778 	if (IS_ENABLED(CONFIG_QED_LL2))
779 		config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
780 
781 	return config_bitmap;
782 }
783 
784 struct qed_load_req_in_params {
785 	u8 hsi_ver;
786 #define QED_LOAD_REQ_HSI_VER_DEFAULT	0
787 #define QED_LOAD_REQ_HSI_VER_1		1
788 	u32 drv_ver_0;
789 	u32 drv_ver_1;
790 	u32 fw_ver;
791 	u8 drv_role;
792 	u8 timeout_val;
793 	u8 force_cmd;
794 	bool avoid_eng_reset;
795 };
796 
797 struct qed_load_req_out_params {
798 	u32 load_code;
799 	u32 exist_drv_ver_0;
800 	u32 exist_drv_ver_1;
801 	u32 exist_fw_ver;
802 	u8 exist_drv_role;
803 	u8 mfw_hsi_ver;
804 	bool drv_exists;
805 };
806 
807 static int
808 __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
809 		   struct qed_ptt *p_ptt,
810 		   struct qed_load_req_in_params *p_in_params,
811 		   struct qed_load_req_out_params *p_out_params)
812 {
813 	struct qed_mcp_mb_params mb_params;
814 	struct load_req_stc load_req;
815 	struct load_rsp_stc load_rsp;
816 	u32 hsi_ver;
817 	int rc;
818 
819 	memset(&load_req, 0, sizeof(load_req));
820 	load_req.drv_ver_0 = p_in_params->drv_ver_0;
821 	load_req.drv_ver_1 = p_in_params->drv_ver_1;
822 	load_req.fw_ver = p_in_params->fw_ver;
823 	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
824 	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
825 			  p_in_params->timeout_val);
826 	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
827 			  p_in_params->force_cmd);
828 	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
829 			  p_in_params->avoid_eng_reset);
830 
831 	hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
832 		  DRV_ID_MCP_HSI_VER_CURRENT :
833 		  (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
834 
835 	memset(&mb_params, 0, sizeof(mb_params));
836 	mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
837 	mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
838 	mb_params.p_data_src = &load_req;
839 	mb_params.data_src_size = sizeof(load_req);
840 	mb_params.p_data_dst = &load_rsp;
841 	mb_params.data_dst_size = sizeof(load_rsp);
842 	mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
843 
844 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
845 		   "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
846 		   mb_params.param,
847 		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
848 		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
849 		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
850 		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
851 
852 	if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
853 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
854 			   "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
855 			   load_req.drv_ver_0,
856 			   load_req.drv_ver_1,
857 			   load_req.fw_ver,
858 			   load_req.misc0,
859 			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
860 			   QED_MFW_GET_FIELD(load_req.misc0,
861 					     LOAD_REQ_LOCK_TO),
862 			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
863 			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
864 	}
865 
866 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
867 	if (rc) {
868 		DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
869 		return rc;
870 	}
871 
872 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
873 		   "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
874 	p_out_params->load_code = mb_params.mcp_resp;
875 
876 	if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
877 	    p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
878 		DP_VERBOSE(p_hwfn,
879 			   QED_MSG_SP,
880 			   "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
881 			   load_rsp.drv_ver_0,
882 			   load_rsp.drv_ver_1,
883 			   load_rsp.fw_ver,
884 			   load_rsp.misc0,
885 			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
886 			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
887 			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
888 
889 		p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
890 		p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
891 		p_out_params->exist_fw_ver = load_rsp.fw_ver;
892 		p_out_params->exist_drv_role =
893 		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
894 		p_out_params->mfw_hsi_ver =
895 		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
896 		p_out_params->drv_exists =
897 		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
898 		    LOAD_RSP_FLAGS0_DRV_EXISTS;
899 	}
900 
901 	return 0;
902 }
903 
904 static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
905 				  enum qed_drv_role drv_role,
906 				  u8 *p_mfw_drv_role)
907 {
908 	switch (drv_role) {
909 	case QED_DRV_ROLE_OS:
910 		*p_mfw_drv_role = DRV_ROLE_OS;
911 		break;
912 	case QED_DRV_ROLE_KDUMP:
913 		*p_mfw_drv_role = DRV_ROLE_KDUMP;
914 		break;
915 	default:
916 		DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
917 		return -EINVAL;
918 	}
919 
920 	return 0;
921 }
922 
923 enum qed_load_req_force {
924 	QED_LOAD_REQ_FORCE_NONE,
925 	QED_LOAD_REQ_FORCE_PF,
926 	QED_LOAD_REQ_FORCE_ALL,
927 };
928 
929 static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
930 
931 				  enum qed_load_req_force force_cmd,
932 				  u8 *p_mfw_force_cmd)
933 {
934 	switch (force_cmd) {
935 	case QED_LOAD_REQ_FORCE_NONE:
936 		*p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
937 		break;
938 	case QED_LOAD_REQ_FORCE_PF:
939 		*p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
940 		break;
941 	case QED_LOAD_REQ_FORCE_ALL:
942 		*p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
943 		break;
944 	}
945 }
946 
947 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
948 		     struct qed_ptt *p_ptt,
949 		     struct qed_load_req_params *p_params)
950 {
951 	struct qed_load_req_out_params out_params;
952 	struct qed_load_req_in_params in_params;
953 	u8 mfw_drv_role, mfw_force_cmd;
954 	int rc;
955 
956 	memset(&in_params, 0, sizeof(in_params));
957 	in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
958 	in_params.drv_ver_0 = QED_VERSION;
959 	in_params.drv_ver_1 = qed_get_config_bitmap();
960 	in_params.fw_ver = STORM_FW_VERSION;
961 	rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
962 	if (rc)
963 		return rc;
964 
965 	in_params.drv_role = mfw_drv_role;
966 	in_params.timeout_val = p_params->timeout_val;
967 	qed_get_mfw_force_cmd(p_hwfn,
968 			      QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
969 
970 	in_params.force_cmd = mfw_force_cmd;
971 	in_params.avoid_eng_reset = p_params->avoid_eng_reset;
972 
973 	memset(&out_params, 0, sizeof(out_params));
974 	rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
975 	if (rc)
976 		return rc;
977 
978 	/* First handle cases where another load request should/might be sent:
979 	 * - MFW expects the old interface [HSI version = 1]
980 	 * - MFW responds that a force load request is required
981 	 */
982 	if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
983 		DP_INFO(p_hwfn,
984 			"MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
985 
986 		in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
987 		memset(&out_params, 0, sizeof(out_params));
988 		rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
989 		if (rc)
990 			return rc;
991 	} else if (out_params.load_code ==
992 		   FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
993 		if (qed_mcp_can_force_load(in_params.drv_role,
994 					   out_params.exist_drv_role,
995 					   p_params->override_force_load)) {
996 			DP_INFO(p_hwfn,
997 				"A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
998 				in_params.drv_role, in_params.fw_ver,
999 				in_params.drv_ver_0, in_params.drv_ver_1,
1000 				out_params.exist_drv_role,
1001 				out_params.exist_fw_ver,
1002 				out_params.exist_drv_ver_0,
1003 				out_params.exist_drv_ver_1);
1004 
1005 			qed_get_mfw_force_cmd(p_hwfn,
1006 					      QED_LOAD_REQ_FORCE_ALL,
1007 					      &mfw_force_cmd);
1008 
1009 			in_params.force_cmd = mfw_force_cmd;
1010 			memset(&out_params, 0, sizeof(out_params));
1011 			rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
1012 						&out_params);
1013 			if (rc)
1014 				return rc;
1015 		} else {
1016 			DP_NOTICE(p_hwfn,
1017 				  "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1018 				  in_params.drv_role, in_params.fw_ver,
1019 				  in_params.drv_ver_0, in_params.drv_ver_1,
1020 				  out_params.exist_drv_role,
1021 				  out_params.exist_fw_ver,
1022 				  out_params.exist_drv_ver_0,
1023 				  out_params.exist_drv_ver_1);
1024 			DP_NOTICE(p_hwfn,
1025 				  "Avoid sending a force load request to prevent disruption of active PFs\n");
1026 
1027 			qed_mcp_cancel_load_req(p_hwfn, p_ptt);
1028 			return -EBUSY;
1029 		}
1030 	}
1031 
1032 	/* Now handle the other types of responses.
1033 	 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1034 	 * expected here after the additional revised load requests were sent.
1035 	 */
1036 	switch (out_params.load_code) {
1037 	case FW_MSG_CODE_DRV_LOAD_ENGINE:
1038 	case FW_MSG_CODE_DRV_LOAD_PORT:
1039 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1040 		if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
1041 		    out_params.drv_exists) {
1042 			/* The role and fw/driver version match, but the PF is
1043 			 * already loaded and has not been unloaded gracefully.
1044 			 */
1045 			DP_NOTICE(p_hwfn,
1046 				  "PF is already loaded\n");
1047 			return -EINVAL;
1048 		}
1049 		break;
1050 	default:
1051 		DP_NOTICE(p_hwfn,
1052 			  "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1053 			  out_params.load_code);
1054 		return -EBUSY;
1055 	}
1056 
1057 	p_params->load_code = out_params.load_code;
1058 
1059 	return 0;
1060 }
1061 
1062 int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1063 {
1064 	u32 resp = 0, param = 0;
1065 	int rc;
1066 
1067 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
1068 			 &param);
1069 	if (rc) {
1070 		DP_NOTICE(p_hwfn,
1071 			  "Failed to send a LOAD_DONE command, rc = %d\n", rc);
1072 		return rc;
1073 	}
1074 
1075 	/* Check if there is a DID mismatch between nvm-cfg/efuse */
1076 	if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
1077 		DP_NOTICE(p_hwfn,
1078 			  "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1079 
1080 	return 0;
1081 }
1082 
1083 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1084 {
1085 	struct qed_mcp_mb_params mb_params;
1086 	u32 wol_param;
1087 
1088 	switch (p_hwfn->cdev->wol_config) {
1089 	case QED_OV_WOL_DISABLED:
1090 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
1091 		break;
1092 	case QED_OV_WOL_ENABLED:
1093 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
1094 		break;
1095 	default:
1096 		DP_NOTICE(p_hwfn,
1097 			  "Unknown WoL configuration %02x\n",
1098 			  p_hwfn->cdev->wol_config);
1099 		fallthrough;
1100 	case QED_OV_WOL_DEFAULT:
1101 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
1102 	}
1103 
1104 	memset(&mb_params, 0, sizeof(mb_params));
1105 	mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
1106 	mb_params.param = wol_param;
1107 	mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
1108 
1109 	return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1110 }
1111 
1112 int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1113 {
1114 	struct qed_mcp_mb_params mb_params;
1115 	struct mcp_mac wol_mac;
1116 
1117 	memset(&mb_params, 0, sizeof(mb_params));
1118 	mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
1119 
1120 	/* Set the primary MAC if WoL is enabled */
1121 	if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
1122 		u8 *p_mac = p_hwfn->cdev->wol_mac;
1123 
1124 		memset(&wol_mac, 0, sizeof(wol_mac));
1125 		wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
1126 		wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
1127 				    p_mac[4] << 8 | p_mac[5];
1128 
1129 		DP_VERBOSE(p_hwfn,
1130 			   (QED_MSG_SP | NETIF_MSG_IFDOWN),
1131 			   "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1132 			   p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
1133 
1134 		mb_params.p_data_src = &wol_mac;
1135 		mb_params.data_src_size = sizeof(wol_mac);
1136 	}
1137 
1138 	return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1139 }
1140 
1141 static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
1142 				  struct qed_ptt *p_ptt)
1143 {
1144 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1145 					PUBLIC_PATH);
1146 	u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1147 	u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1148 				     QED_PATH_ID(p_hwfn));
1149 	u32 disabled_vfs[VF_MAX_STATIC / 32];
1150 	int i;
1151 
1152 	DP_VERBOSE(p_hwfn,
1153 		   QED_MSG_SP,
1154 		   "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1155 		   mfw_path_offsize, path_addr);
1156 
1157 	for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1158 		disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
1159 					 path_addr +
1160 					 offsetof(struct public_path,
1161 						  mcp_vf_disabled) +
1162 					 sizeof(u32) * i);
1163 		DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1164 			   "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1165 			   i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1166 	}
1167 
1168 	if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1169 		qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
1170 }
1171 
1172 int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
1173 		       struct qed_ptt *p_ptt, u32 *vfs_to_ack)
1174 {
1175 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1176 					PUBLIC_FUNC);
1177 	u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
1178 	u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1179 				     MCP_PF_ID(p_hwfn));
1180 	struct qed_mcp_mb_params mb_params;
1181 	int rc;
1182 	int i;
1183 
1184 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1185 		DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1186 			   "Acking VFs [%08x,...,%08x] - %08x\n",
1187 			   i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1188 
1189 	memset(&mb_params, 0, sizeof(mb_params));
1190 	mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1191 	mb_params.p_data_src = vfs_to_ack;
1192 	mb_params.data_src_size = VF_MAX_STATIC / 8;
1193 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1194 	if (rc) {
1195 		DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
1196 		return -EBUSY;
1197 	}
1198 
1199 	/* Clear the ACK bits */
1200 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1201 		qed_wr(p_hwfn, p_ptt,
1202 		       func_addr +
1203 		       offsetof(struct public_func, drv_ack_vf_disabled) +
1204 		       i * sizeof(u32), 0);
1205 
1206 	return rc;
1207 }
1208 
1209 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
1210 					      struct qed_ptt *p_ptt)
1211 {
1212 	u32 transceiver_state;
1213 
1214 	transceiver_state = qed_rd(p_hwfn, p_ptt,
1215 				   p_hwfn->mcp_info->port_addr +
1216 				   offsetof(struct public_port,
1217 					    transceiver_data));
1218 
1219 	DP_VERBOSE(p_hwfn,
1220 		   (NETIF_MSG_HW | QED_MSG_SP),
1221 		   "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1222 		   transceiver_state,
1223 		   (u32)(p_hwfn->mcp_info->port_addr +
1224 			  offsetof(struct public_port, transceiver_data)));
1225 
1226 	transceiver_state = GET_FIELD(transceiver_state,
1227 				      ETH_TRANSCEIVER_STATE);
1228 
1229 	if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1230 		DP_NOTICE(p_hwfn, "Transceiver is present.\n");
1231 	else
1232 		DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
1233 }
1234 
1235 static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
1236 				    struct qed_ptt *p_ptt,
1237 				    struct qed_mcp_link_state *p_link)
1238 {
1239 	u32 eee_status, val;
1240 
1241 	p_link->eee_adv_caps = 0;
1242 	p_link->eee_lp_adv_caps = 0;
1243 	eee_status = qed_rd(p_hwfn,
1244 			    p_ptt,
1245 			    p_hwfn->mcp_info->port_addr +
1246 			    offsetof(struct public_port, eee_status));
1247 	p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
1248 	val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
1249 	if (val & EEE_1G_ADV)
1250 		p_link->eee_adv_caps |= QED_EEE_1G_ADV;
1251 	if (val & EEE_10G_ADV)
1252 		p_link->eee_adv_caps |= QED_EEE_10G_ADV;
1253 	val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
1254 	if (val & EEE_1G_ADV)
1255 		p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
1256 	if (val & EEE_10G_ADV)
1257 		p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
1258 }
1259 
1260 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1261 				  struct qed_ptt *p_ptt,
1262 				  struct public_func *p_data, int pfid)
1263 {
1264 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1265 					PUBLIC_FUNC);
1266 	u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1267 	u32 func_addr;
1268 	u32 i, size;
1269 
1270 	func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1271 	memset(p_data, 0, sizeof(*p_data));
1272 
1273 	size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
1274 	for (i = 0; i < size / sizeof(u32); i++)
1275 		((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1276 					    func_addr + (i << 2));
1277 	return size;
1278 }
1279 
1280 static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1281 				  struct public_func *p_shmem_info)
1282 {
1283 	struct qed_mcp_function_info *p_info;
1284 
1285 	p_info = &p_hwfn->mcp_info->func_info;
1286 
1287 	p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
1288 						  FUNC_MF_CFG_MIN_BW);
1289 	if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1290 		DP_INFO(p_hwfn,
1291 			"bandwidth minimum out of bounds [%02x]. Set to 1\n",
1292 			p_info->bandwidth_min);
1293 		p_info->bandwidth_min = 1;
1294 	}
1295 
1296 	p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
1297 						  FUNC_MF_CFG_MAX_BW);
1298 	if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1299 		DP_INFO(p_hwfn,
1300 			"bandwidth maximum out of bounds [%02x]. Set to 100\n",
1301 			p_info->bandwidth_max);
1302 		p_info->bandwidth_max = 100;
1303 	}
1304 }
1305 
1306 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1307 				       struct qed_ptt *p_ptt, bool b_reset)
1308 {
1309 	struct qed_mcp_link_state *p_link;
1310 	u8 max_bw, min_bw;
1311 	u32 status = 0;
1312 
1313 	/* Prevent SW/attentions from doing this at the same time */
1314 	spin_lock_bh(&p_hwfn->mcp_info->link_lock);
1315 
1316 	p_link = &p_hwfn->mcp_info->link_output;
1317 	memset(p_link, 0, sizeof(*p_link));
1318 	if (!b_reset) {
1319 		status = qed_rd(p_hwfn, p_ptt,
1320 				p_hwfn->mcp_info->port_addr +
1321 				offsetof(struct public_port, link_status));
1322 		DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
1323 			   "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1324 			   status,
1325 			   (u32)(p_hwfn->mcp_info->port_addr +
1326 				 offsetof(struct public_port, link_status)));
1327 	} else {
1328 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1329 			   "Resetting link indications\n");
1330 		goto out;
1331 	}
1332 
1333 	if (p_hwfn->b_drv_link_init) {
1334 		/* Link indication with modern MFW arrives as per-PF
1335 		 * indication.
1336 		 */
1337 		if (p_hwfn->mcp_info->capabilities &
1338 		    FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
1339 			struct public_func shmem_info;
1340 
1341 			qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1342 					       MCP_PF_ID(p_hwfn));
1343 			p_link->link_up = !!(shmem_info.status &
1344 					     FUNC_STATUS_VIRTUAL_LINK_UP);
1345 			qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1346 			DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1347 				   "Virtual link_up = %d\n", p_link->link_up);
1348 		} else {
1349 			p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1350 			DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1351 				   "Physical link_up = %d\n", p_link->link_up);
1352 		}
1353 	} else {
1354 		p_link->link_up = false;
1355 	}
1356 
1357 	p_link->full_duplex = true;
1358 	switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1359 	case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1360 		p_link->speed = 100000;
1361 		break;
1362 	case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1363 		p_link->speed = 50000;
1364 		break;
1365 	case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1366 		p_link->speed = 40000;
1367 		break;
1368 	case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1369 		p_link->speed = 25000;
1370 		break;
1371 	case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1372 		p_link->speed = 20000;
1373 		break;
1374 	case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1375 		p_link->speed = 10000;
1376 		break;
1377 	case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1378 		p_link->full_duplex = false;
1379 		fallthrough;
1380 	case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1381 		p_link->speed = 1000;
1382 		break;
1383 	default:
1384 		p_link->speed = 0;
1385 		p_link->link_up = 0;
1386 	}
1387 
1388 	if (p_link->link_up && p_link->speed)
1389 		p_link->line_speed = p_link->speed;
1390 	else
1391 		p_link->line_speed = 0;
1392 
1393 	max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1394 	min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1395 
1396 	/* Max bandwidth configuration */
1397 	__qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1398 
1399 	/* Min bandwidth configuration */
1400 	__qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1401 	qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
1402 					    p_link->min_pf_rate);
1403 
1404 	p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1405 	p_link->an_complete = !!(status &
1406 				 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1407 	p_link->parallel_detection = !!(status &
1408 					LINK_STATUS_PARALLEL_DETECTION_USED);
1409 	p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1410 
1411 	p_link->partner_adv_speed |=
1412 		(status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1413 		QED_LINK_PARTNER_SPEED_1G_FD : 0;
1414 	p_link->partner_adv_speed |=
1415 		(status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1416 		QED_LINK_PARTNER_SPEED_1G_HD : 0;
1417 	p_link->partner_adv_speed |=
1418 		(status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1419 		QED_LINK_PARTNER_SPEED_10G : 0;
1420 	p_link->partner_adv_speed |=
1421 		(status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1422 		QED_LINK_PARTNER_SPEED_20G : 0;
1423 	p_link->partner_adv_speed |=
1424 		(status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1425 		QED_LINK_PARTNER_SPEED_25G : 0;
1426 	p_link->partner_adv_speed |=
1427 		(status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1428 		QED_LINK_PARTNER_SPEED_40G : 0;
1429 	p_link->partner_adv_speed |=
1430 		(status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1431 		QED_LINK_PARTNER_SPEED_50G : 0;
1432 	p_link->partner_adv_speed |=
1433 		(status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1434 		QED_LINK_PARTNER_SPEED_100G : 0;
1435 
1436 	p_link->partner_tx_flow_ctrl_en =
1437 		!!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1438 	p_link->partner_rx_flow_ctrl_en =
1439 		!!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1440 
1441 	switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1442 	case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1443 		p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
1444 		break;
1445 	case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1446 		p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
1447 		break;
1448 	case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1449 		p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
1450 		break;
1451 	default:
1452 		p_link->partner_adv_pause = 0;
1453 	}
1454 
1455 	p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1456 
1457 	if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
1458 		qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
1459 
1460 	if (p_hwfn->mcp_info->capabilities &
1461 	    FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
1462 		switch (status & LINK_STATUS_FEC_MODE_MASK) {
1463 		case LINK_STATUS_FEC_MODE_NONE:
1464 			p_link->fec_active = QED_FEC_MODE_NONE;
1465 			break;
1466 		case LINK_STATUS_FEC_MODE_FIRECODE_CL74:
1467 			p_link->fec_active = QED_FEC_MODE_FIRECODE;
1468 			break;
1469 		case LINK_STATUS_FEC_MODE_RS_CL91:
1470 			p_link->fec_active = QED_FEC_MODE_RS;
1471 			break;
1472 		default:
1473 			p_link->fec_active = QED_FEC_MODE_AUTO;
1474 		}
1475 	} else {
1476 		p_link->fec_active = QED_FEC_MODE_UNSUPPORTED;
1477 	}
1478 
1479 	qed_link_update(p_hwfn, p_ptt);
1480 out:
1481 	spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
1482 }
1483 
1484 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
1485 {
1486 	struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1487 	struct qed_mcp_mb_params mb_params;
1488 	struct eth_phy_cfg phy_cfg;
1489 	u32 cmd, fec_bit = 0;
1490 	u32 val, ext_speed;
1491 	int rc = 0;
1492 
1493 	/* Set the shmem configuration according to params */
1494 	memset(&phy_cfg, 0, sizeof(phy_cfg));
1495 	cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1496 	if (!params->speed.autoneg)
1497 		phy_cfg.speed = params->speed.forced_speed;
1498 	phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1499 	phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1500 	phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1501 	phy_cfg.adv_speed = params->speed.advertised_speeds;
1502 	phy_cfg.loopback_mode = params->loopback_mode;
1503 
1504 	/* There are MFWs that share this capability regardless of whether
1505 	 * this is feasible or not. And given that at the very least adv_caps
1506 	 * would be set internally by qed, we want to make sure LFA would
1507 	 * still work.
1508 	 */
1509 	if ((p_hwfn->mcp_info->capabilities &
1510 	     FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
1511 		phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1512 		if (params->eee.tx_lpi_enable)
1513 			phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1514 		if (params->eee.adv_caps & QED_EEE_1G_ADV)
1515 			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
1516 		if (params->eee.adv_caps & QED_EEE_10G_ADV)
1517 			phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
1518 		phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
1519 				    EEE_TX_TIMER_USEC_OFFSET) &
1520 				   EEE_TX_TIMER_USEC_MASK;
1521 	}
1522 
1523 	if (p_hwfn->mcp_info->capabilities &
1524 	    FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
1525 		if (params->fec & QED_FEC_MODE_NONE)
1526 			fec_bit |= FEC_FORCE_MODE_NONE;
1527 		else if (params->fec & QED_FEC_MODE_FIRECODE)
1528 			fec_bit |= FEC_FORCE_MODE_FIRECODE;
1529 		else if (params->fec & QED_FEC_MODE_RS)
1530 			fec_bit |= FEC_FORCE_MODE_RS;
1531 		else if (params->fec & QED_FEC_MODE_AUTO)
1532 			fec_bit |= FEC_FORCE_MODE_AUTO;
1533 
1534 		SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit);
1535 	}
1536 
1537 	if (p_hwfn->mcp_info->capabilities &
1538 	    FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
1539 		ext_speed = 0;
1540 		if (params->ext_speed.autoneg)
1541 			ext_speed |= ETH_EXT_SPEED_AN;
1542 
1543 		val = params->ext_speed.forced_speed;
1544 		if (val & QED_EXT_SPEED_1G)
1545 			ext_speed |= ETH_EXT_SPEED_1G;
1546 		if (val & QED_EXT_SPEED_10G)
1547 			ext_speed |= ETH_EXT_SPEED_10G;
1548 		if (val & QED_EXT_SPEED_20G)
1549 			ext_speed |= ETH_EXT_SPEED_20G;
1550 		if (val & QED_EXT_SPEED_25G)
1551 			ext_speed |= ETH_EXT_SPEED_25G;
1552 		if (val & QED_EXT_SPEED_40G)
1553 			ext_speed |= ETH_EXT_SPEED_40G;
1554 		if (val & QED_EXT_SPEED_50G_R)
1555 			ext_speed |= ETH_EXT_SPEED_50G_BASE_R;
1556 		if (val & QED_EXT_SPEED_50G_R2)
1557 			ext_speed |= ETH_EXT_SPEED_50G_BASE_R2;
1558 		if (val & QED_EXT_SPEED_100G_R2)
1559 			ext_speed |= ETH_EXT_SPEED_100G_BASE_R2;
1560 		if (val & QED_EXT_SPEED_100G_R4)
1561 			ext_speed |= ETH_EXT_SPEED_100G_BASE_R4;
1562 		if (val & QED_EXT_SPEED_100G_P4)
1563 			ext_speed |= ETH_EXT_SPEED_100G_BASE_P4;
1564 
1565 		SET_MFW_FIELD(phy_cfg.extended_speed, ETH_EXT_SPEED,
1566 			      ext_speed);
1567 
1568 		ext_speed = 0;
1569 
1570 		val = params->ext_speed.advertised_speeds;
1571 		if (val & QED_EXT_SPEED_MASK_1G)
1572 			ext_speed |= ETH_EXT_ADV_SPEED_1G;
1573 		if (val & QED_EXT_SPEED_MASK_10G)
1574 			ext_speed |= ETH_EXT_ADV_SPEED_10G;
1575 		if (val & QED_EXT_SPEED_MASK_20G)
1576 			ext_speed |= ETH_EXT_ADV_SPEED_20G;
1577 		if (val & QED_EXT_SPEED_MASK_25G)
1578 			ext_speed |= ETH_EXT_ADV_SPEED_25G;
1579 		if (val & QED_EXT_SPEED_MASK_40G)
1580 			ext_speed |= ETH_EXT_ADV_SPEED_40G;
1581 		if (val & QED_EXT_SPEED_MASK_50G_R)
1582 			ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R;
1583 		if (val & QED_EXT_SPEED_MASK_50G_R2)
1584 			ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R2;
1585 		if (val & QED_EXT_SPEED_MASK_100G_R2)
1586 			ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R2;
1587 		if (val & QED_EXT_SPEED_MASK_100G_R4)
1588 			ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R4;
1589 		if (val & QED_EXT_SPEED_MASK_100G_P4)
1590 			ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_P4;
1591 
1592 		phy_cfg.extended_speed |= ext_speed;
1593 
1594 		SET_MFW_FIELD(phy_cfg.fec_mode, FEC_EXTENDED_MODE,
1595 			      params->ext_fec_mode);
1596 	}
1597 
1598 	p_hwfn->b_drv_link_init = b_up;
1599 
1600 	if (b_up) {
1601 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1602 			   "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n",
1603 			   phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
1604 			   phy_cfg.loopback_mode, phy_cfg.fec_mode,
1605 			   phy_cfg.extended_speed);
1606 	} else {
1607 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n");
1608 	}
1609 
1610 	memset(&mb_params, 0, sizeof(mb_params));
1611 	mb_params.cmd = cmd;
1612 	mb_params.p_data_src = &phy_cfg;
1613 	mb_params.data_src_size = sizeof(phy_cfg);
1614 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1615 
1616 	/* if mcp fails to respond we must abort */
1617 	if (rc) {
1618 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1619 		return rc;
1620 	}
1621 
1622 	/* Mimic link-change attention, done for several reasons:
1623 	 *  - On reset, there's no guarantee MFW would trigger
1624 	 *    an attention.
1625 	 *  - On initialization, older MFWs might not indicate link change
1626 	 *    during LFA, so we'll never get an UP indication.
1627 	 */
1628 	qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1629 
1630 	return 0;
1631 }
1632 
1633 u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
1634 				 struct qed_ptt *p_ptt)
1635 {
1636 	u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
1637 
1638 	if (IS_VF(p_hwfn->cdev))
1639 		return -EINVAL;
1640 
1641 	path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1642 						 PUBLIC_PATH);
1643 	path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr);
1644 	path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn));
1645 
1646 	proc_kill_cnt = qed_rd(p_hwfn, p_ptt,
1647 			       path_addr +
1648 			       offsetof(struct public_path, process_kill)) &
1649 			PROCESS_KILL_COUNTER_MASK;
1650 
1651 	return proc_kill_cnt;
1652 }
1653 
1654 static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn,
1655 					struct qed_ptt *p_ptt)
1656 {
1657 	struct qed_dev *cdev = p_hwfn->cdev;
1658 	u32 proc_kill_cnt;
1659 
1660 	/* Prevent possible attentions/interrupts during the recovery handling
1661 	 * and till its load phase, during which they will be re-enabled.
1662 	 */
1663 	qed_int_igu_disable_int(p_hwfn, p_ptt);
1664 
1665 	DP_NOTICE(p_hwfn, "Received a process kill indication\n");
1666 
1667 	/* The following operations should be done once, and thus in CMT mode
1668 	 * are carried out by only the first HW function.
1669 	 */
1670 	if (p_hwfn != QED_LEADING_HWFN(cdev))
1671 		return;
1672 
1673 	if (cdev->recov_in_prog) {
1674 		DP_NOTICE(p_hwfn,
1675 			  "Ignoring the indication since a recovery process is already in progress\n");
1676 		return;
1677 	}
1678 
1679 	cdev->recov_in_prog = true;
1680 
1681 	proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt);
1682 	DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt);
1683 
1684 	qed_schedule_recovery_handler(p_hwfn);
1685 }
1686 
1687 static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
1688 					struct qed_ptt *p_ptt,
1689 					enum MFW_DRV_MSG_TYPE type)
1690 {
1691 	enum qed_mcp_protocol_type stats_type;
1692 	union qed_mcp_protocol_stats stats;
1693 	struct qed_mcp_mb_params mb_params;
1694 	u32 hsi_param;
1695 
1696 	switch (type) {
1697 	case MFW_DRV_MSG_GET_LAN_STATS:
1698 		stats_type = QED_MCP_LAN_STATS;
1699 		hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1700 		break;
1701 	case MFW_DRV_MSG_GET_FCOE_STATS:
1702 		stats_type = QED_MCP_FCOE_STATS;
1703 		hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1704 		break;
1705 	case MFW_DRV_MSG_GET_ISCSI_STATS:
1706 		stats_type = QED_MCP_ISCSI_STATS;
1707 		hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1708 		break;
1709 	case MFW_DRV_MSG_GET_RDMA_STATS:
1710 		stats_type = QED_MCP_RDMA_STATS;
1711 		hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1712 		break;
1713 	default:
1714 		DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
1715 		return;
1716 	}
1717 
1718 	qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
1719 
1720 	memset(&mb_params, 0, sizeof(mb_params));
1721 	mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1722 	mb_params.param = hsi_param;
1723 	mb_params.p_data_src = &stats;
1724 	mb_params.data_src_size = sizeof(stats);
1725 	qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1726 }
1727 
1728 static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1729 {
1730 	struct qed_mcp_function_info *p_info;
1731 	struct public_func shmem_info;
1732 	u32 resp = 0, param = 0;
1733 
1734 	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1735 
1736 	qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1737 
1738 	p_info = &p_hwfn->mcp_info->func_info;
1739 
1740 	qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
1741 	qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
1742 
1743 	/* Acknowledge the MFW */
1744 	qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1745 		    &param);
1746 }
1747 
1748 static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1749 {
1750 	struct public_func shmem_info;
1751 	u32 resp = 0, param = 0;
1752 
1753 	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1754 
1755 	p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1756 						 FUNC_MF_CFG_OV_STAG_MASK;
1757 	p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1758 	if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
1759 		if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
1760 			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1761 			       p_hwfn->hw_info.ovlan);
1762 			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1763 
1764 			/* Configure DB to add external vlan to EDPM packets */
1765 			qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1766 			qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1767 			       p_hwfn->hw_info.ovlan);
1768 		} else {
1769 			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1770 			qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1771 			qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1772 			qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
1773 		}
1774 
1775 		qed_sp_pf_update_stag(p_hwfn);
1776 	}
1777 
1778 	DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1779 		   p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1780 
1781 	/* Acknowledge the MFW */
1782 	qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1783 		    &resp, &param);
1784 }
1785 
1786 static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
1787 				       struct qed_ptt *p_ptt)
1788 {
1789 	/* A single notification should be sent to upper driver in CMT mode */
1790 	if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1791 		return;
1792 
1793 	qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL,
1794 			  "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1795 }
1796 
1797 struct qed_mdump_cmd_params {
1798 	u32 cmd;
1799 	void *p_data_src;
1800 	u8 data_src_size;
1801 	void *p_data_dst;
1802 	u8 data_dst_size;
1803 	u32 mcp_resp;
1804 };
1805 
1806 static int
1807 qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn,
1808 		  struct qed_ptt *p_ptt,
1809 		  struct qed_mdump_cmd_params *p_mdump_cmd_params)
1810 {
1811 	struct qed_mcp_mb_params mb_params;
1812 	int rc;
1813 
1814 	memset(&mb_params, 0, sizeof(mb_params));
1815 	mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
1816 	mb_params.param = p_mdump_cmd_params->cmd;
1817 	mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
1818 	mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
1819 	mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
1820 	mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
1821 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1822 	if (rc)
1823 		return rc;
1824 
1825 	p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
1826 
1827 	if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
1828 		DP_INFO(p_hwfn,
1829 			"The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1830 			p_mdump_cmd_params->cmd);
1831 		rc = -EOPNOTSUPP;
1832 	} else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
1833 		DP_INFO(p_hwfn,
1834 			"The mdump command is not supported by the MFW\n");
1835 		rc = -EOPNOTSUPP;
1836 	}
1837 
1838 	return rc;
1839 }
1840 
1841 static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1842 {
1843 	struct qed_mdump_cmd_params mdump_cmd_params;
1844 
1845 	memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1846 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
1847 
1848 	return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1849 }
1850 
1851 int
1852 qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
1853 			 struct qed_ptt *p_ptt,
1854 			 struct mdump_retain_data_stc *p_mdump_retain)
1855 {
1856 	struct qed_mdump_cmd_params mdump_cmd_params;
1857 	int rc;
1858 
1859 	memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
1860 	mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
1861 	mdump_cmd_params.p_data_dst = p_mdump_retain;
1862 	mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain);
1863 
1864 	rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
1865 	if (rc)
1866 		return rc;
1867 
1868 	if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
1869 		DP_INFO(p_hwfn,
1870 			"Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1871 			mdump_cmd_params.mcp_resp);
1872 		return -EINVAL;
1873 	}
1874 
1875 	return 0;
1876 }
1877 
1878 static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn,
1879 					  struct qed_ptt *p_ptt)
1880 {
1881 	struct mdump_retain_data_stc mdump_retain;
1882 	int rc;
1883 
1884 	/* In CMT mode - no need for more than a single acknowledgment to the
1885 	 * MFW, and no more than a single notification to the upper driver.
1886 	 */
1887 	if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
1888 		return;
1889 
1890 	rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
1891 	if (rc == 0 && mdump_retain.valid)
1892 		DP_NOTICE(p_hwfn,
1893 			  "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1894 			  mdump_retain.epoch,
1895 			  mdump_retain.pf, mdump_retain.status);
1896 	else
1897 		DP_NOTICE(p_hwfn,
1898 			  "The MFW notified that a critical error occurred in the device\n");
1899 
1900 	DP_NOTICE(p_hwfn,
1901 		  "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1902 	qed_mcp_mdump_ack(p_hwfn, p_ptt);
1903 
1904 	qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL);
1905 }
1906 
1907 void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1908 {
1909 	struct public_func shmem_info;
1910 	u32 port_cfg, val;
1911 
1912 	if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
1913 		return;
1914 
1915 	memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
1916 	port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1917 			  offsetof(struct public_port, oem_cfg_port));
1918 	val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
1919 		OEM_CFG_CHANNEL_TYPE_OFFSET;
1920 	if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
1921 		DP_NOTICE(p_hwfn,
1922 			  "Incorrect UFP Channel type  %d port_id 0x%02x\n",
1923 			  val, MFW_PORT(p_hwfn));
1924 
1925 	val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
1926 	if (val == OEM_CFG_SCHED_TYPE_ETS) {
1927 		p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
1928 	} else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
1929 		p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
1930 	} else {
1931 		p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
1932 		DP_NOTICE(p_hwfn,
1933 			  "Unknown UFP scheduling mode %d port_id 0x%02x\n",
1934 			  val, MFW_PORT(p_hwfn));
1935 	}
1936 
1937 	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1938 	val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >>
1939 		OEM_CFG_FUNC_TC_OFFSET;
1940 	p_hwfn->ufp_info.tc = (u8)val;
1941 	val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
1942 		OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
1943 	if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
1944 		p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
1945 	} else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
1946 		p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
1947 	} else {
1948 		p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
1949 		DP_NOTICE(p_hwfn,
1950 			  "Unknown Host priority control %d port_id 0x%02x\n",
1951 			  val, MFW_PORT(p_hwfn));
1952 	}
1953 
1954 	DP_NOTICE(p_hwfn,
1955 		  "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
1956 		  p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
1957 		  p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
1958 }
1959 
1960 static int
1961 qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1962 {
1963 	qed_mcp_read_ufp_config(p_hwfn, p_ptt);
1964 
1965 	if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
1966 		p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
1967 		qed_hw_info_set_offload_tc(&p_hwfn->hw_info,
1968 					   p_hwfn->ufp_info.tc);
1969 
1970 		qed_qm_reconf(p_hwfn, p_ptt);
1971 	} else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
1972 		/* Merge UFP TC with the dcbx TC data */
1973 		qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1974 					  QED_DCBX_OPERATIONAL_MIB);
1975 	} else {
1976 		DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
1977 		return -EINVAL;
1978 	}
1979 
1980 	/* update storm FW with negotiation results */
1981 	qed_sp_pf_update_ufp(p_hwfn);
1982 
1983 	/* update stag pcp value */
1984 	qed_sp_pf_update_stag(p_hwfn);
1985 
1986 	return 0;
1987 }
1988 
1989 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
1990 			  struct qed_ptt *p_ptt)
1991 {
1992 	struct qed_mcp_info *info = p_hwfn->mcp_info;
1993 	int rc = 0;
1994 	bool found = false;
1995 	u16 i;
1996 
1997 	DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
1998 
1999 	/* Read Messages from MFW */
2000 	qed_mcp_read_mb(p_hwfn, p_ptt);
2001 
2002 	/* Compare current messages to old ones */
2003 	for (i = 0; i < info->mfw_mb_length; i++) {
2004 		if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
2005 			continue;
2006 
2007 		found = true;
2008 
2009 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
2010 			   "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
2011 			   i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
2012 
2013 		switch (i) {
2014 		case MFW_DRV_MSG_LINK_CHANGE:
2015 			qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
2016 			break;
2017 		case MFW_DRV_MSG_VF_DISABLED:
2018 			qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
2019 			break;
2020 		case MFW_DRV_MSG_LLDP_DATA_UPDATED:
2021 			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
2022 						  QED_DCBX_REMOTE_LLDP_MIB);
2023 			break;
2024 		case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
2025 			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
2026 						  QED_DCBX_REMOTE_MIB);
2027 			break;
2028 		case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
2029 			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
2030 						  QED_DCBX_OPERATIONAL_MIB);
2031 			break;
2032 		case MFW_DRV_MSG_OEM_CFG_UPDATE:
2033 			qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
2034 			break;
2035 		case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
2036 			qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
2037 			break;
2038 		case MFW_DRV_MSG_ERROR_RECOVERY:
2039 			qed_mcp_handle_process_kill(p_hwfn, p_ptt);
2040 			break;
2041 		case MFW_DRV_MSG_GET_LAN_STATS:
2042 		case MFW_DRV_MSG_GET_FCOE_STATS:
2043 		case MFW_DRV_MSG_GET_ISCSI_STATS:
2044 		case MFW_DRV_MSG_GET_RDMA_STATS:
2045 			qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
2046 			break;
2047 		case MFW_DRV_MSG_BW_UPDATE:
2048 			qed_mcp_update_bw(p_hwfn, p_ptt);
2049 			break;
2050 		case MFW_DRV_MSG_S_TAG_UPDATE:
2051 			qed_mcp_update_stag(p_hwfn, p_ptt);
2052 			break;
2053 		case MFW_DRV_MSG_FAILURE_DETECTED:
2054 			qed_mcp_handle_fan_failure(p_hwfn, p_ptt);
2055 			break;
2056 		case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
2057 			qed_mcp_handle_critical_error(p_hwfn, p_ptt);
2058 			break;
2059 		case MFW_DRV_MSG_GET_TLV_REQ:
2060 			qed_mfw_tlv_req(p_hwfn);
2061 			break;
2062 		default:
2063 			DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
2064 			rc = -EINVAL;
2065 		}
2066 	}
2067 
2068 	/* ACK everything */
2069 	for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
2070 		__be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
2071 
2072 		/* MFW expect answer in BE, so we force write in that format */
2073 		qed_wr(p_hwfn, p_ptt,
2074 		       info->mfw_mb_addr + sizeof(u32) +
2075 		       MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
2076 		       sizeof(u32) + i * sizeof(u32),
2077 		       (__force u32)val);
2078 	}
2079 
2080 	if (!found) {
2081 		DP_NOTICE(p_hwfn,
2082 			  "Received an MFW message indication but no new message!\n");
2083 		rc = -EINVAL;
2084 	}
2085 
2086 	/* Copy the new mfw messages into the shadow */
2087 	memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
2088 
2089 	return rc;
2090 }
2091 
2092 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
2093 			struct qed_ptt *p_ptt,
2094 			u32 *p_mfw_ver, u32 *p_running_bundle_id)
2095 {
2096 	u32 global_offsize;
2097 
2098 	if (IS_VF(p_hwfn->cdev)) {
2099 		if (p_hwfn->vf_iov_info) {
2100 			struct pfvf_acquire_resp_tlv *p_resp;
2101 
2102 			p_resp = &p_hwfn->vf_iov_info->acquire_resp;
2103 			*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
2104 			return 0;
2105 		} else {
2106 			DP_VERBOSE(p_hwfn,
2107 				   QED_MSG_IOV,
2108 				   "VF requested MFW version prior to ACQUIRE\n");
2109 			return -EINVAL;
2110 		}
2111 	}
2112 
2113 	global_offsize = qed_rd(p_hwfn, p_ptt,
2114 				SECTION_OFFSIZE_ADDR(p_hwfn->
2115 						     mcp_info->public_base,
2116 						     PUBLIC_GLOBAL));
2117 	*p_mfw_ver =
2118 	    qed_rd(p_hwfn, p_ptt,
2119 		   SECTION_ADDR(global_offsize,
2120 				0) + offsetof(struct public_global, mfw_ver));
2121 
2122 	if (p_running_bundle_id != NULL) {
2123 		*p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
2124 					      SECTION_ADDR(global_offsize, 0) +
2125 					      offsetof(struct public_global,
2126 						       running_bundle_id));
2127 	}
2128 
2129 	return 0;
2130 }
2131 
2132 int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
2133 			struct qed_ptt *p_ptt, u32 *p_mbi_ver)
2134 {
2135 	u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
2136 
2137 	if (IS_VF(p_hwfn->cdev))
2138 		return -EINVAL;
2139 
2140 	/* Read the address of the nvm_cfg */
2141 	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2142 	if (!nvm_cfg_addr) {
2143 		DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
2144 		return -EINVAL;
2145 	}
2146 
2147 	/* Read the offset of nvm_cfg1 */
2148 	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2149 
2150 	mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2151 		       offsetof(struct nvm_cfg1, glob) +
2152 		       offsetof(struct nvm_cfg1_glob, mbi_version);
2153 	*p_mbi_ver = qed_rd(p_hwfn, p_ptt,
2154 			    mbi_ver_addr) &
2155 		     (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
2156 		      NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
2157 		      NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
2158 
2159 	return 0;
2160 }
2161 
2162 int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
2163 			   struct qed_ptt *p_ptt, u32 *p_media_type)
2164 {
2165 	*p_media_type = MEDIA_UNSPECIFIED;
2166 
2167 	if (IS_VF(p_hwfn->cdev))
2168 		return -EINVAL;
2169 
2170 	if (!qed_mcp_is_init(p_hwfn)) {
2171 		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2172 		return -EBUSY;
2173 	}
2174 
2175 	if (!p_ptt) {
2176 		*p_media_type = MEDIA_UNSPECIFIED;
2177 		return -EINVAL;
2178 	}
2179 
2180 	*p_media_type = qed_rd(p_hwfn, p_ptt,
2181 			       p_hwfn->mcp_info->port_addr +
2182 			       offsetof(struct public_port,
2183 					media_type));
2184 
2185 	return 0;
2186 }
2187 
2188 int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
2189 				 struct qed_ptt *p_ptt,
2190 				 u32 *p_transceiver_state,
2191 				 u32 *p_transceiver_type)
2192 {
2193 	u32 transceiver_info;
2194 
2195 	*p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
2196 	*p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
2197 
2198 	if (IS_VF(p_hwfn->cdev))
2199 		return -EINVAL;
2200 
2201 	if (!qed_mcp_is_init(p_hwfn)) {
2202 		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2203 		return -EBUSY;
2204 	}
2205 
2206 	transceiver_info = qed_rd(p_hwfn, p_ptt,
2207 				  p_hwfn->mcp_info->port_addr +
2208 				  offsetof(struct public_port,
2209 					   transceiver_data));
2210 
2211 	*p_transceiver_state = (transceiver_info &
2212 				ETH_TRANSCEIVER_STATE_MASK) >>
2213 				ETH_TRANSCEIVER_STATE_OFFSET;
2214 
2215 	if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
2216 		*p_transceiver_type = (transceiver_info &
2217 				       ETH_TRANSCEIVER_TYPE_MASK) >>
2218 				       ETH_TRANSCEIVER_TYPE_OFFSET;
2219 	else
2220 		*p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
2221 
2222 	return 0;
2223 }
2224 static bool qed_is_transceiver_ready(u32 transceiver_state,
2225 				     u32 transceiver_type)
2226 {
2227 	if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
2228 	    ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
2229 	    (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
2230 		return true;
2231 
2232 	return false;
2233 }
2234 
2235 int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
2236 			     struct qed_ptt *p_ptt, u32 *p_speed_mask)
2237 {
2238 	u32 transceiver_type, transceiver_state;
2239 	int ret;
2240 
2241 	ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
2242 					   &transceiver_type);
2243 	if (ret)
2244 		return ret;
2245 
2246 	if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
2247 				     false)
2248 		return -EINVAL;
2249 
2250 	switch (transceiver_type) {
2251 	case ETH_TRANSCEIVER_TYPE_1G_LX:
2252 	case ETH_TRANSCEIVER_TYPE_1G_SX:
2253 	case ETH_TRANSCEIVER_TYPE_1G_PCC:
2254 	case ETH_TRANSCEIVER_TYPE_1G_ACC:
2255 	case ETH_TRANSCEIVER_TYPE_1000BASET:
2256 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2257 		break;
2258 	case ETH_TRANSCEIVER_TYPE_10G_SR:
2259 	case ETH_TRANSCEIVER_TYPE_10G_LR:
2260 	case ETH_TRANSCEIVER_TYPE_10G_LRM:
2261 	case ETH_TRANSCEIVER_TYPE_10G_ER:
2262 	case ETH_TRANSCEIVER_TYPE_10G_PCC:
2263 	case ETH_TRANSCEIVER_TYPE_10G_ACC:
2264 	case ETH_TRANSCEIVER_TYPE_4x10G:
2265 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2266 		break;
2267 	case ETH_TRANSCEIVER_TYPE_40G_LR4:
2268 	case ETH_TRANSCEIVER_TYPE_40G_SR4:
2269 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
2270 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
2271 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2272 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2273 		break;
2274 	case ETH_TRANSCEIVER_TYPE_100G_AOC:
2275 	case ETH_TRANSCEIVER_TYPE_100G_SR4:
2276 	case ETH_TRANSCEIVER_TYPE_100G_LR4:
2277 	case ETH_TRANSCEIVER_TYPE_100G_ER4:
2278 	case ETH_TRANSCEIVER_TYPE_100G_ACC:
2279 		*p_speed_mask =
2280 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2281 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2282 		break;
2283 	case ETH_TRANSCEIVER_TYPE_25G_SR:
2284 	case ETH_TRANSCEIVER_TYPE_25G_LR:
2285 	case ETH_TRANSCEIVER_TYPE_25G_AOC:
2286 	case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
2287 	case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
2288 	case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
2289 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
2290 		break;
2291 	case ETH_TRANSCEIVER_TYPE_25G_CA_N:
2292 	case ETH_TRANSCEIVER_TYPE_25G_CA_S:
2293 	case ETH_TRANSCEIVER_TYPE_25G_CA_L:
2294 	case ETH_TRANSCEIVER_TYPE_4x25G_CR:
2295 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2296 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2297 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2298 		break;
2299 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
2300 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
2301 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2302 				NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2303 		break;
2304 	case ETH_TRANSCEIVER_TYPE_40G_CR4:
2305 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
2306 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2307 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2308 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2309 		break;
2310 	case ETH_TRANSCEIVER_TYPE_100G_CR4:
2311 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
2312 		*p_speed_mask =
2313 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2314 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
2315 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2316 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2317 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
2318 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2319 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2320 		break;
2321 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
2322 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
2323 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
2324 		*p_speed_mask =
2325 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
2326 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
2327 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
2328 		    NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2329 		break;
2330 	case ETH_TRANSCEIVER_TYPE_XLPPI:
2331 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
2332 		break;
2333 	case ETH_TRANSCEIVER_TYPE_10G_BASET:
2334 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
2335 	case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
2336 		*p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
2337 				NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
2338 		break;
2339 	default:
2340 		DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
2341 			transceiver_type);
2342 		*p_speed_mask = 0xff;
2343 		break;
2344 	}
2345 
2346 	return 0;
2347 }
2348 
2349 int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
2350 			     struct qed_ptt *p_ptt, u32 *p_board_config)
2351 {
2352 	u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
2353 
2354 	if (IS_VF(p_hwfn->cdev))
2355 		return -EINVAL;
2356 
2357 	if (!qed_mcp_is_init(p_hwfn)) {
2358 		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
2359 		return -EBUSY;
2360 	}
2361 	if (!p_ptt) {
2362 		*p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
2363 		return -EINVAL;
2364 	}
2365 
2366 	nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
2367 	nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
2368 	port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2369 			offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
2370 	*p_board_config = qed_rd(p_hwfn, p_ptt,
2371 				 port_cfg_addr +
2372 				 offsetof(struct nvm_cfg1_port,
2373 					  board_cfg));
2374 
2375 	return 0;
2376 }
2377 
2378 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2379 static void
2380 qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
2381 			       enum qed_pci_personality *p_proto)
2382 {
2383 	/* There wasn't ever a legacy MFW that published iwarp.
2384 	 * So at this point, this is either plain l2 or RoCE.
2385 	 */
2386 	if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
2387 		*p_proto = QED_PCI_ETH_ROCE;
2388 	else
2389 		*p_proto = QED_PCI_ETH;
2390 
2391 	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2392 		   "According to Legacy capabilities, L2 personality is %08x\n",
2393 		   (u32) *p_proto);
2394 }
2395 
2396 static int
2397 qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
2398 			    struct qed_ptt *p_ptt,
2399 			    enum qed_pci_personality *p_proto)
2400 {
2401 	u32 resp = 0, param = 0;
2402 	int rc;
2403 
2404 	rc = qed_mcp_cmd(p_hwfn, p_ptt,
2405 			 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
2406 	if (rc)
2407 		return rc;
2408 	if (resp != FW_MSG_CODE_OK) {
2409 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
2410 			   "MFW lacks support for command; Returns %08x\n",
2411 			   resp);
2412 		return -EINVAL;
2413 	}
2414 
2415 	switch (param) {
2416 	case FW_MB_PARAM_GET_PF_RDMA_NONE:
2417 		*p_proto = QED_PCI_ETH;
2418 		break;
2419 	case FW_MB_PARAM_GET_PF_RDMA_ROCE:
2420 		*p_proto = QED_PCI_ETH_ROCE;
2421 		break;
2422 	case FW_MB_PARAM_GET_PF_RDMA_IWARP:
2423 		*p_proto = QED_PCI_ETH_IWARP;
2424 		break;
2425 	case FW_MB_PARAM_GET_PF_RDMA_BOTH:
2426 		*p_proto = QED_PCI_ETH_RDMA;
2427 		break;
2428 	default:
2429 		DP_NOTICE(p_hwfn,
2430 			  "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2431 			  param);
2432 		return -EINVAL;
2433 	}
2434 
2435 	DP_VERBOSE(p_hwfn,
2436 		   NETIF_MSG_IFUP,
2437 		   "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2438 		   (u32) *p_proto, resp, param);
2439 	return 0;
2440 }
2441 
2442 static int
2443 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
2444 			struct public_func *p_info,
2445 			struct qed_ptt *p_ptt,
2446 			enum qed_pci_personality *p_proto)
2447 {
2448 	int rc = 0;
2449 
2450 	switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
2451 	case FUNC_MF_CFG_PROTOCOL_ETHERNET:
2452 		if (!IS_ENABLED(CONFIG_QED_RDMA))
2453 			*p_proto = QED_PCI_ETH;
2454 		else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
2455 			qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
2456 		break;
2457 	case FUNC_MF_CFG_PROTOCOL_ISCSI:
2458 		*p_proto = QED_PCI_ISCSI;
2459 		break;
2460 	case FUNC_MF_CFG_PROTOCOL_FCOE:
2461 		*p_proto = QED_PCI_FCOE;
2462 		break;
2463 	case FUNC_MF_CFG_PROTOCOL_ROCE:
2464 		DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
2465 		fallthrough;
2466 	default:
2467 		rc = -EINVAL;
2468 	}
2469 
2470 	return rc;
2471 }
2472 
2473 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
2474 				 struct qed_ptt *p_ptt)
2475 {
2476 	struct qed_mcp_function_info *info;
2477 	struct public_func shmem_info;
2478 
2479 	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
2480 	info = &p_hwfn->mcp_info->func_info;
2481 
2482 	info->pause_on_host = (shmem_info.config &
2483 			       FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
2484 
2485 	if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
2486 				    &info->protocol)) {
2487 		DP_ERR(p_hwfn, "Unknown personality %08x\n",
2488 		       (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
2489 		return -EINVAL;
2490 	}
2491 
2492 	qed_read_pf_bandwidth(p_hwfn, &shmem_info);
2493 
2494 	if (shmem_info.mac_upper || shmem_info.mac_lower) {
2495 		info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
2496 		info->mac[1] = (u8)(shmem_info.mac_upper);
2497 		info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
2498 		info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
2499 		info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
2500 		info->mac[5] = (u8)(shmem_info.mac_lower);
2501 
2502 		/* Store primary MAC for later possible WoL */
2503 		memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
2504 	} else {
2505 		DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
2506 	}
2507 
2508 	info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
2509 			 (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
2510 	info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
2511 			 (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
2512 
2513 	info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
2514 
2515 	info->mtu = (u16)shmem_info.mtu_size;
2516 
2517 	p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
2518 	p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
2519 	if (qed_mcp_is_init(p_hwfn)) {
2520 		u32 resp = 0, param = 0;
2521 		int rc;
2522 
2523 		rc = qed_mcp_cmd(p_hwfn, p_ptt,
2524 				 DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
2525 		if (rc)
2526 			return rc;
2527 		if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
2528 			p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
2529 	}
2530 
2531 	DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
2532 		   "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %pM wwn port %llx node %llx ovlan %04x wol %02x\n",
2533 		info->pause_on_host, info->protocol,
2534 		info->bandwidth_min, info->bandwidth_max,
2535 		info->mac,
2536 		info->wwn_port, info->wwn_node,
2537 		info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
2538 
2539 	return 0;
2540 }
2541 
2542 struct qed_mcp_link_params
2543 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
2544 {
2545 	if (!p_hwfn || !p_hwfn->mcp_info)
2546 		return NULL;
2547 	return &p_hwfn->mcp_info->link_input;
2548 }
2549 
2550 struct qed_mcp_link_state
2551 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
2552 {
2553 	if (!p_hwfn || !p_hwfn->mcp_info)
2554 		return NULL;
2555 	return &p_hwfn->mcp_info->link_output;
2556 }
2557 
2558 struct qed_mcp_link_capabilities
2559 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
2560 {
2561 	if (!p_hwfn || !p_hwfn->mcp_info)
2562 		return NULL;
2563 	return &p_hwfn->mcp_info->link_capabilities;
2564 }
2565 
2566 int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2567 {
2568 	u32 resp = 0, param = 0;
2569 	int rc;
2570 
2571 	rc = qed_mcp_cmd(p_hwfn, p_ptt,
2572 			 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
2573 
2574 	/* Wait for the drain to complete before returning */
2575 	msleep(1020);
2576 
2577 	return rc;
2578 }
2579 
2580 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
2581 			   struct qed_ptt *p_ptt, u32 *p_flash_size)
2582 {
2583 	u32 flash_size;
2584 
2585 	if (IS_VF(p_hwfn->cdev))
2586 		return -EINVAL;
2587 
2588 	flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
2589 	flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
2590 		      MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
2591 	flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
2592 
2593 	*p_flash_size = flash_size;
2594 
2595 	return 0;
2596 }
2597 
2598 int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2599 {
2600 	struct qed_dev *cdev = p_hwfn->cdev;
2601 
2602 	if (cdev->recov_in_prog) {
2603 		DP_NOTICE(p_hwfn,
2604 			  "Avoid triggering a recovery since such a process is already in progress\n");
2605 		return -EAGAIN;
2606 	}
2607 
2608 	DP_NOTICE(p_hwfn, "Triggering a recovery process\n");
2609 	qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
2610 
2611 	return 0;
2612 }
2613 
2614 #define QED_RECOVERY_PROLOG_SLEEP_MS    100
2615 
2616 int qed_recovery_prolog(struct qed_dev *cdev)
2617 {
2618 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2619 	struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
2620 	int rc;
2621 
2622 	/* Allow ongoing PCIe transactions to complete */
2623 	msleep(QED_RECOVERY_PROLOG_SLEEP_MS);
2624 
2625 	/* Clear the PF's internal FID_enable in the PXP */
2626 	rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
2627 	if (rc)
2628 		DP_NOTICE(p_hwfn,
2629 			  "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
2630 			  rc);
2631 
2632 	return rc;
2633 }
2634 
2635 static int
2636 qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
2637 			  struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2638 {
2639 	u32 resp = 0, param = 0, rc_param = 0;
2640 	int rc;
2641 
2642 	/* Only Leader can configure MSIX, and need to take CMT into account */
2643 	if (!IS_LEAD_HWFN(p_hwfn))
2644 		return 0;
2645 	num *= p_hwfn->cdev->num_hwfns;
2646 
2647 	param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
2648 		 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
2649 	param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
2650 		 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
2651 
2652 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
2653 			 &resp, &rc_param);
2654 
2655 	if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
2656 		DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
2657 		rc = -EINVAL;
2658 	} else {
2659 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2660 			   "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2661 			   num, vf_id);
2662 	}
2663 
2664 	return rc;
2665 }
2666 
2667 static int
2668 qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
2669 			  struct qed_ptt *p_ptt, u8 num)
2670 {
2671 	u32 resp = 0, param = num, rc_param = 0;
2672 	int rc;
2673 
2674 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
2675 			 param, &resp, &rc_param);
2676 
2677 	if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
2678 		DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
2679 		rc = -EINVAL;
2680 	} else {
2681 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2682 			   "Requested 0x%02x MSI-x interrupts for VFs\n", num);
2683 	}
2684 
2685 	return rc;
2686 }
2687 
2688 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
2689 			   struct qed_ptt *p_ptt, u8 vf_id, u8 num)
2690 {
2691 	if (QED_IS_BB(p_hwfn->cdev))
2692 		return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
2693 	else
2694 		return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
2695 }
2696 
2697 int
2698 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
2699 			 struct qed_ptt *p_ptt,
2700 			 struct qed_mcp_drv_version *p_ver)
2701 {
2702 	struct qed_mcp_mb_params mb_params;
2703 	struct drv_version_stc drv_version;
2704 	__be32 val;
2705 	u32 i;
2706 	int rc;
2707 
2708 	memset(&drv_version, 0, sizeof(drv_version));
2709 	drv_version.version = p_ver->version;
2710 	for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
2711 		val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
2712 		*(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
2713 	}
2714 
2715 	memset(&mb_params, 0, sizeof(mb_params));
2716 	mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
2717 	mb_params.p_data_src = &drv_version;
2718 	mb_params.data_src_size = sizeof(drv_version);
2719 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2720 	if (rc)
2721 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2722 
2723 	return rc;
2724 }
2725 
2726 /* A maximal 100 msec waiting time for the MCP to halt */
2727 #define QED_MCP_HALT_SLEEP_MS		10
2728 #define QED_MCP_HALT_MAX_RETRIES	10
2729 
2730 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2731 {
2732 	u32 resp = 0, param = 0, cpu_state, cnt = 0;
2733 	int rc;
2734 
2735 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
2736 			 &param);
2737 	if (rc) {
2738 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2739 		return rc;
2740 	}
2741 
2742 	do {
2743 		msleep(QED_MCP_HALT_SLEEP_MS);
2744 		cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2745 		if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
2746 			break;
2747 	} while (++cnt < QED_MCP_HALT_MAX_RETRIES);
2748 
2749 	if (cnt == QED_MCP_HALT_MAX_RETRIES) {
2750 		DP_NOTICE(p_hwfn,
2751 			  "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2752 			  qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
2753 		return -EBUSY;
2754 	}
2755 
2756 	qed_mcp_cmd_set_blocking(p_hwfn, true);
2757 
2758 	return 0;
2759 }
2760 
2761 #define QED_MCP_RESUME_SLEEP_MS	10
2762 
2763 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2764 {
2765 	u32 cpu_mode, cpu_state;
2766 
2767 	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
2768 
2769 	cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
2770 	cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
2771 	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
2772 	msleep(QED_MCP_RESUME_SLEEP_MS);
2773 	cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
2774 
2775 	if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
2776 		DP_NOTICE(p_hwfn,
2777 			  "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2778 			  cpu_mode, cpu_state);
2779 		return -EBUSY;
2780 	}
2781 
2782 	qed_mcp_cmd_set_blocking(p_hwfn, false);
2783 
2784 	return 0;
2785 }
2786 
2787 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
2788 				     struct qed_ptt *p_ptt,
2789 				     enum qed_ov_client client)
2790 {
2791 	u32 resp = 0, param = 0;
2792 	u32 drv_mb_param;
2793 	int rc;
2794 
2795 	switch (client) {
2796 	case QED_OV_CLIENT_DRV:
2797 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
2798 		break;
2799 	case QED_OV_CLIENT_USER:
2800 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
2801 		break;
2802 	case QED_OV_CLIENT_VENDOR_SPEC:
2803 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
2804 		break;
2805 	default:
2806 		DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
2807 		return -EINVAL;
2808 	}
2809 
2810 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
2811 			 drv_mb_param, &resp, &param);
2812 	if (rc)
2813 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
2814 
2815 	return rc;
2816 }
2817 
2818 int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
2819 				   struct qed_ptt *p_ptt,
2820 				   enum qed_ov_driver_state drv_state)
2821 {
2822 	u32 resp = 0, param = 0;
2823 	u32 drv_mb_param;
2824 	int rc;
2825 
2826 	switch (drv_state) {
2827 	case QED_OV_DRIVER_STATE_NOT_LOADED:
2828 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
2829 		break;
2830 	case QED_OV_DRIVER_STATE_DISABLED:
2831 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
2832 		break;
2833 	case QED_OV_DRIVER_STATE_ACTIVE:
2834 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
2835 		break;
2836 	default:
2837 		DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
2838 		return -EINVAL;
2839 	}
2840 
2841 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
2842 			 drv_mb_param, &resp, &param);
2843 	if (rc)
2844 		DP_ERR(p_hwfn, "Failed to send driver state\n");
2845 
2846 	return rc;
2847 }
2848 
2849 int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
2850 			  struct qed_ptt *p_ptt, u16 mtu)
2851 {
2852 	u32 resp = 0, param = 0;
2853 	u32 drv_mb_param;
2854 	int rc;
2855 
2856 	drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
2857 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
2858 			 drv_mb_param, &resp, &param);
2859 	if (rc)
2860 		DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
2861 
2862 	return rc;
2863 }
2864 
2865 int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
2866 			  struct qed_ptt *p_ptt, u8 *mac)
2867 {
2868 	struct qed_mcp_mb_params mb_params;
2869 	u32 mfw_mac[2];
2870 	int rc;
2871 
2872 	memset(&mb_params, 0, sizeof(mb_params));
2873 	mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
2874 	mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
2875 			  DRV_MSG_CODE_VMAC_TYPE_SHIFT;
2876 	mb_params.param |= MCP_PF_ID(p_hwfn);
2877 
2878 	/* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2879 	 * in 32-bit granularity.
2880 	 * So the MAC has to be set in native order [and not byte order],
2881 	 * otherwise it would be read incorrectly by MFW after swap.
2882 	 */
2883 	mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
2884 	mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
2885 
2886 	mb_params.p_data_src = (u8 *)mfw_mac;
2887 	mb_params.data_src_size = 8;
2888 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2889 	if (rc)
2890 		DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
2891 
2892 	/* Store primary MAC for later possible WoL */
2893 	memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
2894 
2895 	return rc;
2896 }
2897 
2898 int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
2899 			  struct qed_ptt *p_ptt, enum qed_ov_wol wol)
2900 {
2901 	u32 resp = 0, param = 0;
2902 	u32 drv_mb_param;
2903 	int rc;
2904 
2905 	if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
2906 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
2907 			   "Can't change WoL configuration when WoL isn't supported\n");
2908 		return -EINVAL;
2909 	}
2910 
2911 	switch (wol) {
2912 	case QED_OV_WOL_DEFAULT:
2913 		drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
2914 		break;
2915 	case QED_OV_WOL_DISABLED:
2916 		drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
2917 		break;
2918 	case QED_OV_WOL_ENABLED:
2919 		drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
2920 		break;
2921 	default:
2922 		DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
2923 		return -EINVAL;
2924 	}
2925 
2926 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2927 			 drv_mb_param, &resp, &param);
2928 	if (rc)
2929 		DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2930 
2931 	/* Store the WoL update for a future unload */
2932 	p_hwfn->cdev->wol_config = (u8)wol;
2933 
2934 	return rc;
2935 }
2936 
2937 int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
2938 			      struct qed_ptt *p_ptt,
2939 			      enum qed_ov_eswitch eswitch)
2940 {
2941 	u32 resp = 0, param = 0;
2942 	u32 drv_mb_param;
2943 	int rc;
2944 
2945 	switch (eswitch) {
2946 	case QED_OV_ESWITCH_NONE:
2947 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2948 		break;
2949 	case QED_OV_ESWITCH_VEB:
2950 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2951 		break;
2952 	case QED_OV_ESWITCH_VEPA:
2953 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2954 		break;
2955 	default:
2956 		DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2957 		return -EINVAL;
2958 	}
2959 
2960 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2961 			 drv_mb_param, &resp, &param);
2962 	if (rc)
2963 		DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2964 
2965 	return rc;
2966 }
2967 
2968 int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
2969 		    struct qed_ptt *p_ptt, enum qed_led_mode mode)
2970 {
2971 	u32 resp = 0, param = 0, drv_mb_param;
2972 	int rc;
2973 
2974 	switch (mode) {
2975 	case QED_LED_MODE_ON:
2976 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2977 		break;
2978 	case QED_LED_MODE_OFF:
2979 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2980 		break;
2981 	case QED_LED_MODE_RESTORE:
2982 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2983 		break;
2984 	default:
2985 		DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
2986 		return -EINVAL;
2987 	}
2988 
2989 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2990 			 drv_mb_param, &resp, &param);
2991 
2992 	return rc;
2993 }
2994 
2995 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
2996 			  struct qed_ptt *p_ptt, u32 mask_parities)
2997 {
2998 	u32 resp = 0, param = 0;
2999 	int rc;
3000 
3001 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
3002 			 mask_parities, &resp, &param);
3003 
3004 	if (rc) {
3005 		DP_ERR(p_hwfn,
3006 		       "MCP response failure for mask parities, aborting\n");
3007 	} else if (resp != FW_MSG_CODE_OK) {
3008 		DP_ERR(p_hwfn,
3009 		       "MCP did not acknowledge mask parity request. Old MFW?\n");
3010 		rc = -EINVAL;
3011 	}
3012 
3013 	return rc;
3014 }
3015 
3016 int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
3017 {
3018 	u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
3019 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3020 	u32 resp = 0, resp_param = 0;
3021 	struct qed_ptt *p_ptt;
3022 	int rc = 0;
3023 
3024 	p_ptt = qed_ptt_acquire(p_hwfn);
3025 	if (!p_ptt)
3026 		return -EBUSY;
3027 
3028 	while (bytes_left > 0) {
3029 		bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
3030 
3031 		rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3032 					DRV_MSG_CODE_NVM_READ_NVRAM,
3033 					addr + offset +
3034 					(bytes_to_copy <<
3035 					 DRV_MB_PARAM_NVM_LEN_OFFSET),
3036 					&resp, &resp_param,
3037 					&read_len,
3038 					(u32 *)(p_buf + offset));
3039 
3040 		if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
3041 			DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
3042 			break;
3043 		}
3044 
3045 		/* This can be a lengthy process, and it's possible scheduler
3046 		 * isn't preemptable. Sleep a bit to prevent CPU hogging.
3047 		 */
3048 		if (bytes_left % 0x1000 <
3049 		    (bytes_left - read_len) % 0x1000)
3050 			usleep_range(1000, 2000);
3051 
3052 		offset += read_len;
3053 		bytes_left -= read_len;
3054 	}
3055 
3056 	cdev->mcp_nvm_resp = resp;
3057 	qed_ptt_release(p_hwfn, p_ptt);
3058 
3059 	return rc;
3060 }
3061 
3062 int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
3063 {
3064 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3065 	struct qed_ptt *p_ptt;
3066 
3067 	p_ptt = qed_ptt_acquire(p_hwfn);
3068 	if (!p_ptt)
3069 		return -EBUSY;
3070 
3071 	memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp));
3072 	qed_ptt_release(p_hwfn, p_ptt);
3073 
3074 	return 0;
3075 }
3076 
3077 int qed_mcp_nvm_write(struct qed_dev *cdev,
3078 		      u32 cmd, u32 addr, u8 *p_buf, u32 len)
3079 {
3080 	u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
3081 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
3082 	struct qed_ptt *p_ptt;
3083 	int rc = -EINVAL;
3084 
3085 	p_ptt = qed_ptt_acquire(p_hwfn);
3086 	if (!p_ptt)
3087 		return -EBUSY;
3088 
3089 	switch (cmd) {
3090 	case QED_PUT_FILE_BEGIN:
3091 		nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
3092 		break;
3093 	case QED_PUT_FILE_DATA:
3094 		nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
3095 		break;
3096 	case QED_NVM_WRITE_NVRAM:
3097 		nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
3098 		break;
3099 	default:
3100 		DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd);
3101 		rc = -EINVAL;
3102 		goto out;
3103 	}
3104 
3105 	buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
3106 	while (buf_idx < len) {
3107 		if (cmd == QED_PUT_FILE_BEGIN)
3108 			nvm_offset = addr;
3109 		else
3110 			nvm_offset = ((buf_size <<
3111 				       DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
3112 				       buf_idx;
3113 		rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
3114 					&resp, &param, buf_size,
3115 					(u32 *)&p_buf[buf_idx]);
3116 		if (rc) {
3117 			DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc);
3118 			resp = FW_MSG_CODE_ERROR;
3119 			break;
3120 		}
3121 
3122 		if (resp != FW_MSG_CODE_OK &&
3123 		    resp != FW_MSG_CODE_NVM_OK &&
3124 		    resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
3125 			DP_NOTICE(cdev,
3126 				  "nvm write failed, resp = 0x%08x\n", resp);
3127 			rc = -EINVAL;
3128 			break;
3129 		}
3130 
3131 		/* This can be a lengthy process, and it's possible scheduler
3132 		 * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
3133 		 */
3134 		if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
3135 			usleep_range(1000, 2000);
3136 
3137 		/* For MBI upgrade, MFW response includes the next buffer offset
3138 		 * to be delivered to MFW.
3139 		 */
3140 		if (param && cmd == QED_PUT_FILE_DATA) {
3141 			buf_idx = QED_MFW_GET_FIELD(param,
3142 					FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
3143 			buf_size = QED_MFW_GET_FIELD(param,
3144 					 FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
3145 		} else {
3146 			buf_idx += buf_size;
3147 			buf_size = min_t(u32, (len - buf_idx),
3148 					 MCP_DRV_NVM_BUF_LEN);
3149 		}
3150 	}
3151 
3152 	cdev->mcp_nvm_resp = resp;
3153 out:
3154 	qed_ptt_release(p_hwfn, p_ptt);
3155 
3156 	return rc;
3157 }
3158 
3159 int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3160 			 u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf)
3161 {
3162 	u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0;
3163 	u32 resp, param;
3164 	int rc;
3165 
3166 	nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) &
3167 		       DRV_MB_PARAM_TRANSCEIVER_PORT_MASK;
3168 	nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) &
3169 		       DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK;
3170 
3171 	addr = offset;
3172 	offset = 0;
3173 	bytes_left = len;
3174 	while (bytes_left > 0) {
3175 		bytes_to_copy = min_t(u32, bytes_left,
3176 				      MAX_I2C_TRANSACTION_SIZE);
3177 		nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
3178 			       DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
3179 		nvm_offset |= ((addr + offset) <<
3180 			       DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) &
3181 			       DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK;
3182 		nvm_offset |= (bytes_to_copy <<
3183 			       DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) &
3184 			       DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK;
3185 		rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3186 					DRV_MSG_CODE_TRANSCEIVER_READ,
3187 					nvm_offset, &resp, &param, &buf_size,
3188 					(u32 *)(p_buf + offset));
3189 		if (rc) {
3190 			DP_NOTICE(p_hwfn,
3191 				  "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3192 				  rc);
3193 			return rc;
3194 		}
3195 
3196 		if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
3197 			return -ENODEV;
3198 		else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
3199 			return -EINVAL;
3200 
3201 		offset += buf_size;
3202 		bytes_left -= buf_size;
3203 	}
3204 
3205 	return 0;
3206 }
3207 
3208 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3209 {
3210 	u32 drv_mb_param = 0, rsp, param;
3211 	int rc = 0;
3212 
3213 	drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
3214 			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3215 
3216 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3217 			 drv_mb_param, &rsp, &param);
3218 
3219 	if (rc)
3220 		return rc;
3221 
3222 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3223 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3224 		rc = -EAGAIN;
3225 
3226 	return rc;
3227 }
3228 
3229 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3230 {
3231 	u32 drv_mb_param, rsp, param;
3232 	int rc = 0;
3233 
3234 	drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
3235 			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3236 
3237 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3238 			 drv_mb_param, &rsp, &param);
3239 
3240 	if (rc)
3241 		return rc;
3242 
3243 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3244 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
3245 		rc = -EAGAIN;
3246 
3247 	return rc;
3248 }
3249 
3250 int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
3251 				    struct qed_ptt *p_ptt,
3252 				    u32 *num_images)
3253 {
3254 	u32 drv_mb_param = 0, rsp;
3255 	int rc = 0;
3256 
3257 	drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
3258 			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
3259 
3260 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
3261 			 drv_mb_param, &rsp, num_images);
3262 	if (rc)
3263 		return rc;
3264 
3265 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
3266 		rc = -EINVAL;
3267 
3268 	return rc;
3269 }
3270 
3271 int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
3272 				   struct qed_ptt *p_ptt,
3273 				   struct bist_nvm_image_att *p_image_att,
3274 				   u32 image_index)
3275 {
3276 	u32 buf_size = 0, param, resp = 0, resp_param = 0;
3277 	int rc;
3278 
3279 	param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
3280 		DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
3281 	param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
3282 
3283 	rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
3284 				DRV_MSG_CODE_BIST_TEST, param,
3285 				&resp, &resp_param,
3286 				&buf_size,
3287 				(u32 *)p_image_att);
3288 	if (rc)
3289 		return rc;
3290 
3291 	if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
3292 	    (p_image_att->return_code != 1))
3293 		rc = -EINVAL;
3294 
3295 	return rc;
3296 }
3297 
3298 int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
3299 {
3300 	struct qed_nvm_image_info nvm_info;
3301 	struct qed_ptt *p_ptt;
3302 	int rc;
3303 	u32 i;
3304 
3305 	if (p_hwfn->nvm_info.valid)
3306 		return 0;
3307 
3308 	p_ptt = qed_ptt_acquire(p_hwfn);
3309 	if (!p_ptt) {
3310 		DP_ERR(p_hwfn, "failed to acquire ptt\n");
3311 		return -EBUSY;
3312 	}
3313 
3314 	/* Acquire from MFW the amount of available images */
3315 	nvm_info.num_images = 0;
3316 	rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
3317 					     p_ptt, &nvm_info.num_images);
3318 	if (rc == -EOPNOTSUPP) {
3319 		DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
3320 		goto out;
3321 	} else if (rc || !nvm_info.num_images) {
3322 		DP_ERR(p_hwfn, "Failed getting number of images\n");
3323 		goto err0;
3324 	}
3325 
3326 	nvm_info.image_att = kmalloc_array(nvm_info.num_images,
3327 					   sizeof(struct bist_nvm_image_att),
3328 					   GFP_KERNEL);
3329 	if (!nvm_info.image_att) {
3330 		rc = -ENOMEM;
3331 		goto err0;
3332 	}
3333 
3334 	/* Iterate over images and get their attributes */
3335 	for (i = 0; i < nvm_info.num_images; i++) {
3336 		rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
3337 						    &nvm_info.image_att[i], i);
3338 		if (rc) {
3339 			DP_ERR(p_hwfn,
3340 			       "Failed getting image index %d attributes\n", i);
3341 			goto err1;
3342 		}
3343 
3344 		DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
3345 			   nvm_info.image_att[i].len);
3346 	}
3347 out:
3348 	/* Update hwfn's nvm_info */
3349 	if (nvm_info.num_images) {
3350 		p_hwfn->nvm_info.num_images = nvm_info.num_images;
3351 		kfree(p_hwfn->nvm_info.image_att);
3352 		p_hwfn->nvm_info.image_att = nvm_info.image_att;
3353 		p_hwfn->nvm_info.valid = true;
3354 	}
3355 
3356 	qed_ptt_release(p_hwfn, p_ptt);
3357 	return 0;
3358 
3359 err1:
3360 	kfree(nvm_info.image_att);
3361 err0:
3362 	qed_ptt_release(p_hwfn, p_ptt);
3363 	return rc;
3364 }
3365 
3366 void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn)
3367 {
3368 	kfree(p_hwfn->nvm_info.image_att);
3369 	p_hwfn->nvm_info.image_att = NULL;
3370 	p_hwfn->nvm_info.valid = false;
3371 }
3372 
3373 int
3374 qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
3375 			  enum qed_nvm_images image_id,
3376 			  struct qed_nvm_image_att *p_image_att)
3377 {
3378 	enum nvm_image_type type;
3379 	u32 i;
3380 
3381 	/* Translate image_id into MFW definitions */
3382 	switch (image_id) {
3383 	case QED_NVM_IMAGE_ISCSI_CFG:
3384 		type = NVM_TYPE_ISCSI_CFG;
3385 		break;
3386 	case QED_NVM_IMAGE_FCOE_CFG:
3387 		type = NVM_TYPE_FCOE_CFG;
3388 		break;
3389 	case QED_NVM_IMAGE_MDUMP:
3390 		type = NVM_TYPE_MDUMP;
3391 		break;
3392 	case QED_NVM_IMAGE_NVM_CFG1:
3393 		type = NVM_TYPE_NVM_CFG1;
3394 		break;
3395 	case QED_NVM_IMAGE_DEFAULT_CFG:
3396 		type = NVM_TYPE_DEFAULT_CFG;
3397 		break;
3398 	case QED_NVM_IMAGE_NVM_META:
3399 		type = NVM_TYPE_META;
3400 		break;
3401 	default:
3402 		DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
3403 			  image_id);
3404 		return -EINVAL;
3405 	}
3406 
3407 	qed_mcp_nvm_info_populate(p_hwfn);
3408 	for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
3409 		if (type == p_hwfn->nvm_info.image_att[i].image_type)
3410 			break;
3411 	if (i == p_hwfn->nvm_info.num_images) {
3412 		DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3413 			   "Failed to find nvram image of type %08x\n",
3414 			   image_id);
3415 		return -ENOENT;
3416 	}
3417 
3418 	p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
3419 	p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
3420 
3421 	return 0;
3422 }
3423 
3424 int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
3425 			  enum qed_nvm_images image_id,
3426 			  u8 *p_buffer, u32 buffer_len)
3427 {
3428 	struct qed_nvm_image_att image_att;
3429 	int rc;
3430 
3431 	memset(p_buffer, 0, buffer_len);
3432 
3433 	rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
3434 	if (rc)
3435 		return rc;
3436 
3437 	/* Validate sizes - both the image's and the supplied buffer's */
3438 	if (image_att.length <= 4) {
3439 		DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
3440 			   "Image [%d] is too small - only %d bytes\n",
3441 			   image_id, image_att.length);
3442 		return -EINVAL;
3443 	}
3444 
3445 	if (image_att.length > buffer_len) {
3446 		DP_VERBOSE(p_hwfn,
3447 			   QED_MSG_STORAGE,
3448 			   "Image [%d] is too big - %08x bytes where only %08x are available\n",
3449 			   image_id, image_att.length, buffer_len);
3450 		return -ENOMEM;
3451 	}
3452 
3453 	return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
3454 				p_buffer, image_att.length);
3455 }
3456 
3457 static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
3458 {
3459 	enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
3460 
3461 	switch (res_id) {
3462 	case QED_SB:
3463 		mfw_res_id = RESOURCE_NUM_SB_E;
3464 		break;
3465 	case QED_L2_QUEUE:
3466 		mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
3467 		break;
3468 	case QED_VPORT:
3469 		mfw_res_id = RESOURCE_NUM_VPORT_E;
3470 		break;
3471 	case QED_RSS_ENG:
3472 		mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
3473 		break;
3474 	case QED_PQ:
3475 		mfw_res_id = RESOURCE_NUM_PQ_E;
3476 		break;
3477 	case QED_RL:
3478 		mfw_res_id = RESOURCE_NUM_RL_E;
3479 		break;
3480 	case QED_MAC:
3481 	case QED_VLAN:
3482 		/* Each VFC resource can accommodate both a MAC and a VLAN */
3483 		mfw_res_id = RESOURCE_VFC_FILTER_E;
3484 		break;
3485 	case QED_ILT:
3486 		mfw_res_id = RESOURCE_ILT_E;
3487 		break;
3488 	case QED_LL2_RAM_QUEUE:
3489 		mfw_res_id = RESOURCE_LL2_QUEUE_E;
3490 		break;
3491 	case QED_LL2_CTX_QUEUE:
3492 		mfw_res_id = RESOURCE_LL2_CQS_E;
3493 		break;
3494 	case QED_RDMA_CNQ_RAM:
3495 	case QED_CMDQS_CQS:
3496 		/* CNQ/CMDQS are the same resource */
3497 		mfw_res_id = RESOURCE_CQS_E;
3498 		break;
3499 	case QED_RDMA_STATS_QUEUE:
3500 		mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
3501 		break;
3502 	case QED_BDQ:
3503 		mfw_res_id = RESOURCE_BDQ_E;
3504 		break;
3505 	default:
3506 		break;
3507 	}
3508 
3509 	return mfw_res_id;
3510 }
3511 
3512 #define QED_RESC_ALLOC_VERSION_MAJOR    2
3513 #define QED_RESC_ALLOC_VERSION_MINOR    0
3514 #define QED_RESC_ALLOC_VERSION				     \
3515 	((QED_RESC_ALLOC_VERSION_MAJOR <<		     \
3516 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
3517 	 (QED_RESC_ALLOC_VERSION_MINOR <<		     \
3518 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3519 
3520 struct qed_resc_alloc_in_params {
3521 	u32 cmd;
3522 	enum qed_resources res_id;
3523 	u32 resc_max_val;
3524 };
3525 
3526 struct qed_resc_alloc_out_params {
3527 	u32 mcp_resp;
3528 	u32 mcp_param;
3529 	u32 resc_num;
3530 	u32 resc_start;
3531 	u32 vf_resc_num;
3532 	u32 vf_resc_start;
3533 	u32 flags;
3534 };
3535 
3536 static int
3537 qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
3538 			    struct qed_ptt *p_ptt,
3539 			    struct qed_resc_alloc_in_params *p_in_params,
3540 			    struct qed_resc_alloc_out_params *p_out_params)
3541 {
3542 	struct qed_mcp_mb_params mb_params;
3543 	struct resource_info mfw_resc_info;
3544 	int rc;
3545 
3546 	memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
3547 
3548 	mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
3549 	if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
3550 		DP_ERR(p_hwfn,
3551 		       "Failed to match resource %d [%s] with the MFW resources\n",
3552 		       p_in_params->res_id,
3553 		       qed_hw_get_resc_name(p_in_params->res_id));
3554 		return -EINVAL;
3555 	}
3556 
3557 	switch (p_in_params->cmd) {
3558 	case DRV_MSG_SET_RESOURCE_VALUE_MSG:
3559 		mfw_resc_info.size = p_in_params->resc_max_val;
3560 		fallthrough;
3561 	case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
3562 		break;
3563 	default:
3564 		DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
3565 		       p_in_params->cmd);
3566 		return -EINVAL;
3567 	}
3568 
3569 	memset(&mb_params, 0, sizeof(mb_params));
3570 	mb_params.cmd = p_in_params->cmd;
3571 	mb_params.param = QED_RESC_ALLOC_VERSION;
3572 	mb_params.p_data_src = &mfw_resc_info;
3573 	mb_params.data_src_size = sizeof(mfw_resc_info);
3574 	mb_params.p_data_dst = mb_params.p_data_src;
3575 	mb_params.data_dst_size = mb_params.data_src_size;
3576 
3577 	DP_VERBOSE(p_hwfn,
3578 		   QED_MSG_SP,
3579 		   "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3580 		   p_in_params->cmd,
3581 		   p_in_params->res_id,
3582 		   qed_hw_get_resc_name(p_in_params->res_id),
3583 		   QED_MFW_GET_FIELD(mb_params.param,
3584 				     DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3585 		   QED_MFW_GET_FIELD(mb_params.param,
3586 				     DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3587 		   p_in_params->resc_max_val);
3588 
3589 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3590 	if (rc)
3591 		return rc;
3592 
3593 	p_out_params->mcp_resp = mb_params.mcp_resp;
3594 	p_out_params->mcp_param = mb_params.mcp_param;
3595 	p_out_params->resc_num = mfw_resc_info.size;
3596 	p_out_params->resc_start = mfw_resc_info.offset;
3597 	p_out_params->vf_resc_num = mfw_resc_info.vf_size;
3598 	p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
3599 	p_out_params->flags = mfw_resc_info.flags;
3600 
3601 	DP_VERBOSE(p_hwfn,
3602 		   QED_MSG_SP,
3603 		   "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3604 		   QED_MFW_GET_FIELD(p_out_params->mcp_param,
3605 				     FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
3606 		   QED_MFW_GET_FIELD(p_out_params->mcp_param,
3607 				     FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
3608 		   p_out_params->resc_num,
3609 		   p_out_params->resc_start,
3610 		   p_out_params->vf_resc_num,
3611 		   p_out_params->vf_resc_start, p_out_params->flags);
3612 
3613 	return 0;
3614 }
3615 
3616 int
3617 qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
3618 			 struct qed_ptt *p_ptt,
3619 			 enum qed_resources res_id,
3620 			 u32 resc_max_val, u32 *p_mcp_resp)
3621 {
3622 	struct qed_resc_alloc_out_params out_params;
3623 	struct qed_resc_alloc_in_params in_params;
3624 	int rc;
3625 
3626 	memset(&in_params, 0, sizeof(in_params));
3627 	in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
3628 	in_params.res_id = res_id;
3629 	in_params.resc_max_val = resc_max_val;
3630 	memset(&out_params, 0, sizeof(out_params));
3631 	rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3632 					 &out_params);
3633 	if (rc)
3634 		return rc;
3635 
3636 	*p_mcp_resp = out_params.mcp_resp;
3637 
3638 	return 0;
3639 }
3640 
3641 int
3642 qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
3643 		      struct qed_ptt *p_ptt,
3644 		      enum qed_resources res_id,
3645 		      u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
3646 {
3647 	struct qed_resc_alloc_out_params out_params;
3648 	struct qed_resc_alloc_in_params in_params;
3649 	int rc;
3650 
3651 	memset(&in_params, 0, sizeof(in_params));
3652 	in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
3653 	in_params.res_id = res_id;
3654 	memset(&out_params, 0, sizeof(out_params));
3655 	rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
3656 					 &out_params);
3657 	if (rc)
3658 		return rc;
3659 
3660 	*p_mcp_resp = out_params.mcp_resp;
3661 
3662 	if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
3663 		*p_resc_num = out_params.resc_num;
3664 		*p_resc_start = out_params.resc_start;
3665 	}
3666 
3667 	return 0;
3668 }
3669 
3670 int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3671 {
3672 	u32 mcp_resp, mcp_param;
3673 
3674 	return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
3675 			   &mcp_resp, &mcp_param);
3676 }
3677 
3678 static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
3679 				struct qed_ptt *p_ptt,
3680 				u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
3681 {
3682 	int rc;
3683 
3684 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
3685 			 p_mcp_resp, p_mcp_param);
3686 	if (rc)
3687 		return rc;
3688 
3689 	if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3690 		DP_INFO(p_hwfn,
3691 			"The resource command is unsupported by the MFW\n");
3692 		return -EINVAL;
3693 	}
3694 
3695 	if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
3696 		u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
3697 
3698 		DP_NOTICE(p_hwfn,
3699 			  "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3700 			  param, opcode);
3701 		return -EINVAL;
3702 	}
3703 
3704 	return rc;
3705 }
3706 
3707 static int
3708 __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3709 		    struct qed_ptt *p_ptt,
3710 		    struct qed_resc_lock_params *p_params)
3711 {
3712 	u32 param = 0, mcp_resp, mcp_param;
3713 	u8 opcode;
3714 	int rc;
3715 
3716 	switch (p_params->timeout) {
3717 	case QED_MCP_RESC_LOCK_TO_DEFAULT:
3718 		opcode = RESOURCE_OPCODE_REQ;
3719 		p_params->timeout = 0;
3720 		break;
3721 	case QED_MCP_RESC_LOCK_TO_NONE:
3722 		opcode = RESOURCE_OPCODE_REQ_WO_AGING;
3723 		p_params->timeout = 0;
3724 		break;
3725 	default:
3726 		opcode = RESOURCE_OPCODE_REQ_W_AGING;
3727 		break;
3728 	}
3729 
3730 	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3731 	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3732 	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
3733 
3734 	DP_VERBOSE(p_hwfn,
3735 		   QED_MSG_SP,
3736 		   "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3737 		   param, p_params->timeout, opcode, p_params->resource);
3738 
3739 	/* Attempt to acquire the resource */
3740 	rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3741 	if (rc)
3742 		return rc;
3743 
3744 	/* Analyze the response */
3745 	p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
3746 	opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3747 
3748 	DP_VERBOSE(p_hwfn,
3749 		   QED_MSG_SP,
3750 		   "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3751 		   mcp_param, opcode, p_params->owner);
3752 
3753 	switch (opcode) {
3754 	case RESOURCE_OPCODE_GNT:
3755 		p_params->b_granted = true;
3756 		break;
3757 	case RESOURCE_OPCODE_BUSY:
3758 		p_params->b_granted = false;
3759 		break;
3760 	default:
3761 		DP_NOTICE(p_hwfn,
3762 			  "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3763 			  mcp_param, opcode);
3764 		return -EINVAL;
3765 	}
3766 
3767 	return 0;
3768 }
3769 
3770 int
3771 qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
3772 		  struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
3773 {
3774 	u32 retry_cnt = 0;
3775 	int rc;
3776 
3777 	do {
3778 		/* No need for an interval before the first iteration */
3779 		if (retry_cnt) {
3780 			if (p_params->sleep_b4_retry) {
3781 				u16 retry_interval_in_ms =
3782 				    DIV_ROUND_UP(p_params->retry_interval,
3783 						 1000);
3784 
3785 				msleep(retry_interval_in_ms);
3786 			} else {
3787 				udelay(p_params->retry_interval);
3788 			}
3789 		}
3790 
3791 		rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
3792 		if (rc)
3793 			return rc;
3794 
3795 		if (p_params->b_granted)
3796 			break;
3797 	} while (retry_cnt++ < p_params->retry_num);
3798 
3799 	return 0;
3800 }
3801 
3802 int
3803 qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
3804 		    struct qed_ptt *p_ptt,
3805 		    struct qed_resc_unlock_params *p_params)
3806 {
3807 	u32 param = 0, mcp_resp, mcp_param;
3808 	u8 opcode;
3809 	int rc;
3810 
3811 	opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
3812 				   : RESOURCE_OPCODE_RELEASE;
3813 	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
3814 	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
3815 
3816 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
3817 		   "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3818 		   param, opcode, p_params->resource);
3819 
3820 	/* Attempt to release the resource */
3821 	rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
3822 	if (rc)
3823 		return rc;
3824 
3825 	/* Analyze the response */
3826 	opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
3827 
3828 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
3829 		   "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3830 		   mcp_param, opcode);
3831 
3832 	switch (opcode) {
3833 	case RESOURCE_OPCODE_RELEASED_PREVIOUS:
3834 		DP_INFO(p_hwfn,
3835 			"Resource unlock request for an already released resource [%d]\n",
3836 			p_params->resource);
3837 		fallthrough;
3838 	case RESOURCE_OPCODE_RELEASED:
3839 		p_params->b_released = true;
3840 		break;
3841 	case RESOURCE_OPCODE_WRONG_OWNER:
3842 		p_params->b_released = false;
3843 		break;
3844 	default:
3845 		DP_NOTICE(p_hwfn,
3846 			  "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3847 			  mcp_param, opcode);
3848 		return -EINVAL;
3849 	}
3850 
3851 	return 0;
3852 }
3853 
3854 void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
3855 				    struct qed_resc_unlock_params *p_unlock,
3856 				    enum qed_resc_lock
3857 				    resource, bool b_is_permanent)
3858 {
3859 	if (p_lock) {
3860 		memset(p_lock, 0, sizeof(*p_lock));
3861 
3862 		/* Permanent resources don't require aging, and there's no
3863 		 * point in trying to acquire them more than once since it's
3864 		 * unexpected another entity would release them.
3865 		 */
3866 		if (b_is_permanent) {
3867 			p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
3868 		} else {
3869 			p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
3870 			p_lock->retry_interval =
3871 			    QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
3872 			p_lock->sleep_b4_retry = true;
3873 		}
3874 
3875 		p_lock->resource = resource;
3876 	}
3877 
3878 	if (p_unlock) {
3879 		memset(p_unlock, 0, sizeof(*p_unlock));
3880 		p_unlock->resource = resource;
3881 	}
3882 }
3883 
3884 bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn)
3885 {
3886 	return !!(p_hwfn->mcp_info->capabilities &
3887 		  FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
3888 }
3889 
3890 int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3891 {
3892 	u32 mcp_resp;
3893 	int rc;
3894 
3895 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
3896 			 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
3897 	if (!rc)
3898 		DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
3899 			   "MFW supported features: %08x\n",
3900 			   p_hwfn->mcp_info->capabilities);
3901 
3902 	return rc;
3903 }
3904 
3905 int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3906 {
3907 	u32 mcp_resp, mcp_param, features;
3908 
3909 	features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
3910 		   DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK |
3911 		   DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL;
3912 
3913 	if (QED_IS_E5(p_hwfn->cdev))
3914 		features |=
3915 		    DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL;
3916 
3917 	return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
3918 			   features, &mcp_resp, &mcp_param);
3919 }
3920 
3921 int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3922 {
3923 	struct qed_mcp_mb_params mb_params = {0};
3924 	struct qed_dev *cdev = p_hwfn->cdev;
3925 	u8 fir_valid, l2_valid;
3926 	int rc;
3927 
3928 	mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
3929 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3930 	if (rc)
3931 		return rc;
3932 
3933 	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3934 		DP_INFO(p_hwfn,
3935 			"The get_engine_config command is unsupported by the MFW\n");
3936 		return -EOPNOTSUPP;
3937 	}
3938 
3939 	fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3940 				      FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
3941 	if (fir_valid)
3942 		cdev->fir_affin =
3943 		    QED_MFW_GET_FIELD(mb_params.mcp_param,
3944 				      FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
3945 
3946 	l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
3947 				     FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
3948 	if (l2_valid)
3949 		cdev->l2_affin_hint =
3950 		    QED_MFW_GET_FIELD(mb_params.mcp_param,
3951 				      FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
3952 
3953 	DP_INFO(p_hwfn,
3954 		"Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
3955 		fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint);
3956 
3957 	return 0;
3958 }
3959 
3960 int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
3961 {
3962 	struct qed_mcp_mb_params mb_params = {0};
3963 	struct qed_dev *cdev = p_hwfn->cdev;
3964 	int rc;
3965 
3966 	mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
3967 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
3968 	if (rc)
3969 		return rc;
3970 
3971 	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
3972 		DP_INFO(p_hwfn,
3973 			"The get_ppfid_bitmap command is unsupported by the MFW\n");
3974 		return -EOPNOTSUPP;
3975 	}
3976 
3977 	cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param,
3978 					       FW_MB_PARAM_PPFID_BITMAP);
3979 
3980 	DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n",
3981 		   cdev->ppfid_bitmap);
3982 
3983 	return 0;
3984 }
3985 
3986 int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
3987 			u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
3988 			u32 *p_len)
3989 {
3990 	u32 mb_param = 0, resp, param;
3991 	int rc;
3992 
3993 	QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
3994 	if (flags & QED_NVM_CFG_OPTION_INIT)
3995 		QED_MFW_SET_FIELD(mb_param,
3996 				  DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
3997 	if (flags & QED_NVM_CFG_OPTION_FREE)
3998 		QED_MFW_SET_FIELD(mb_param,
3999 				  DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
4000 	if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
4001 		QED_MFW_SET_FIELD(mb_param,
4002 				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
4003 		QED_MFW_SET_FIELD(mb_param,
4004 				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
4005 				  entity_id);
4006 	}
4007 
4008 	rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
4009 				DRV_MSG_CODE_GET_NVM_CFG_OPTION,
4010 				mb_param, &resp, &param, p_len, (u32 *)p_buf);
4011 
4012 	return rc;
4013 }
4014 
4015 int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
4016 			u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
4017 			u32 len)
4018 {
4019 	u32 mb_param = 0, resp, param;
4020 
4021 	QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
4022 	if (flags & QED_NVM_CFG_OPTION_ALL)
4023 		QED_MFW_SET_FIELD(mb_param,
4024 				  DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1);
4025 	if (flags & QED_NVM_CFG_OPTION_INIT)
4026 		QED_MFW_SET_FIELD(mb_param,
4027 				  DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
4028 	if (flags & QED_NVM_CFG_OPTION_COMMIT)
4029 		QED_MFW_SET_FIELD(mb_param,
4030 				  DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1);
4031 	if (flags & QED_NVM_CFG_OPTION_FREE)
4032 		QED_MFW_SET_FIELD(mb_param,
4033 				  DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
4034 	if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
4035 		QED_MFW_SET_FIELD(mb_param,
4036 				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
4037 		QED_MFW_SET_FIELD(mb_param,
4038 				  DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
4039 				  entity_id);
4040 	}
4041 
4042 	return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
4043 				  DRV_MSG_CODE_SET_NVM_CFG_OPTION,
4044 				  mb_param, &resp, &param, len, (u32 *)p_buf);
4045 }
4046 
4047 #define QED_MCP_DBG_DATA_MAX_SIZE               MCP_DRV_NVM_BUF_LEN
4048 #define QED_MCP_DBG_DATA_MAX_HEADER_SIZE        sizeof(u32)
4049 #define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
4050 	(QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
4051 
4052 static int
4053 __qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
4054 			  struct qed_ptt *p_ptt, u8 *p_buf, u8 size)
4055 {
4056 	struct qed_mcp_mb_params mb_params;
4057 	int rc;
4058 
4059 	if (size > QED_MCP_DBG_DATA_MAX_SIZE) {
4060 		DP_ERR(p_hwfn,
4061 		       "Debug data size is %d while it should not exceed %d\n",
4062 		       size, QED_MCP_DBG_DATA_MAX_SIZE);
4063 		return -EINVAL;
4064 	}
4065 
4066 	memset(&mb_params, 0, sizeof(mb_params));
4067 	mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND;
4068 	SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size);
4069 	mb_params.p_data_src = p_buf;
4070 	mb_params.data_src_size = size;
4071 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
4072 	if (rc)
4073 		return rc;
4074 
4075 	if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
4076 		DP_INFO(p_hwfn,
4077 			"The DEBUG_DATA_SEND command is unsupported by the MFW\n");
4078 		return -EOPNOTSUPP;
4079 	} else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) {
4080 		DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n");
4081 		return -EBUSY;
4082 	} else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) {
4083 		DP_NOTICE(p_hwfn,
4084 			  "Failed to send debug data to the MFW [resp 0x%08x]\n",
4085 			  mb_params.mcp_resp);
4086 		return -EINVAL;
4087 	}
4088 
4089 	return 0;
4090 }
4091 
4092 enum qed_mcp_dbg_data_type {
4093 	QED_MCP_DBG_DATA_TYPE_RAW,
4094 };
4095 
4096 /* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
4097 #define QED_MCP_DBG_DATA_HDR_SN_OFFSET  0
4098 #define QED_MCP_DBG_DATA_HDR_SN_MASK            0x00000fff
4099 #define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET        12
4100 #define QED_MCP_DBG_DATA_HDR_TYPE_MASK  0x000ff000
4101 #define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET       20
4102 #define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
4103 #define QED_MCP_DBG_DATA_HDR_PF_OFFSET  28
4104 #define QED_MCP_DBG_DATA_HDR_PF_MASK            0xf0000000
4105 
4106 #define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST        0x1
4107 #define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
4108 
4109 static int
4110 qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
4111 			struct qed_ptt *p_ptt,
4112 			enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size)
4113 {
4114 	u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf;
4115 	u32 tmp_size = size, *p_header, *p_payload;
4116 	u8 flags = 0;
4117 	u16 seq;
4118 	int rc;
4119 
4120 	p_header = (u32 *)raw_data;
4121 	p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE);
4122 
4123 	seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq);
4124 
4125 	/* First chunk is marked as 'first' */
4126 	flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4127 
4128 	*p_header = 0;
4129 	SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq);
4130 	SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type);
4131 	SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4132 	SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id);
4133 
4134 	while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) {
4135 		memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE);
4136 		rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4137 					       QED_MCP_DBG_DATA_MAX_SIZE);
4138 		if (rc)
4139 			return rc;
4140 
4141 		/* Clear the 'first' marking after sending the first chunk */
4142 		if (p_tmp_buf == p_buf) {
4143 			flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
4144 			SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS,
4145 				      flags);
4146 		}
4147 
4148 		p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4149 		tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
4150 	}
4151 
4152 	/* Last chunk is marked as 'last' */
4153 	flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST;
4154 	SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
4155 	memcpy(p_payload, p_tmp_buf, tmp_size);
4156 
4157 	/* Casting the left size to u8 is ok since at this point it is <= 32 */
4158 	return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
4159 					 (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE +
4160 					 tmp_size));
4161 }
4162 
4163 int
4164 qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
4165 			    struct qed_ptt *p_ptt, u8 *p_buf, u32 size)
4166 {
4167 	return qed_mcp_send_debug_data(p_hwfn, p_ptt,
4168 				       QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);
4169 }
4170