1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/delay.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/etherdevice.h>
42 #include "qed.h"
43 #include "qed_dcbx.h"
44 #include "qed_hsi.h"
45 #include "qed_hw.h"
46 #include "qed_mcp.h"
47 #include "qed_reg_addr.h"
48 #include "qed_sriov.h"
49 
50 #define CHIP_MCP_RESP_ITER_US 10
51 
52 #define QED_DRV_MB_MAX_RETRIES	(500 * 1000)	/* Account for 5 sec */
53 #define QED_MCP_RESET_RETRIES	(50 * 1000)	/* Account for 500 msec */
54 
55 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val)	     \
56 	qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
57 	       _val)
58 
59 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
60 	qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
61 
62 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val)  \
63 	DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
64 		     offsetof(struct public_drv_mb, _field), _val)
65 
66 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field)	   \
67 	DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
68 		     offsetof(struct public_drv_mb, _field))
69 
70 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
71 		  DRV_ID_PDA_COMP_VER_SHIFT)
72 
73 #define MCP_BYTES_PER_MBIT_SHIFT 17
74 
75 bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
76 {
77 	if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
78 		return false;
79 	return true;
80 }
81 
82 void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
83 {
84 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
85 					PUBLIC_PORT);
86 	u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
87 
88 	p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
89 						   MFW_PORT(p_hwfn));
90 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
91 		   "port_addr = 0x%x, port_id 0x%02x\n",
92 		   p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
93 }
94 
95 void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
96 {
97 	u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
98 	u32 tmp, i;
99 
100 	if (!p_hwfn->mcp_info->public_base)
101 		return;
102 
103 	for (i = 0; i < length; i++) {
104 		tmp = qed_rd(p_hwfn, p_ptt,
105 			     p_hwfn->mcp_info->mfw_mb_addr +
106 			     (i << 2) + sizeof(u32));
107 
108 		/* The MB data is actually BE; Need to force it to cpu */
109 		((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
110 			be32_to_cpu((__force __be32)tmp);
111 	}
112 }
113 
114 struct qed_mcp_cmd_elem {
115 	struct list_head list;
116 	struct qed_mcp_mb_params *p_mb_params;
117 	u16 expected_seq_num;
118 	bool b_is_completed;
119 };
120 
121 /* Must be called while cmd_lock is acquired */
122 static struct qed_mcp_cmd_elem *
123 qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
124 		     struct qed_mcp_mb_params *p_mb_params,
125 		     u16 expected_seq_num)
126 {
127 	struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
128 
129 	p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
130 	if (!p_cmd_elem)
131 		goto out;
132 
133 	p_cmd_elem->p_mb_params = p_mb_params;
134 	p_cmd_elem->expected_seq_num = expected_seq_num;
135 	list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
136 out:
137 	return p_cmd_elem;
138 }
139 
140 /* Must be called while cmd_lock is acquired */
141 static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
142 				 struct qed_mcp_cmd_elem *p_cmd_elem)
143 {
144 	list_del(&p_cmd_elem->list);
145 	kfree(p_cmd_elem);
146 }
147 
148 /* Must be called while cmd_lock is acquired */
149 static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
150 						     u16 seq_num)
151 {
152 	struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
153 
154 	list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
155 		if (p_cmd_elem->expected_seq_num == seq_num)
156 			return p_cmd_elem;
157 	}
158 
159 	return NULL;
160 }
161 
162 int qed_mcp_free(struct qed_hwfn *p_hwfn)
163 {
164 	if (p_hwfn->mcp_info) {
165 		struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp;
166 
167 		kfree(p_hwfn->mcp_info->mfw_mb_cur);
168 		kfree(p_hwfn->mcp_info->mfw_mb_shadow);
169 
170 		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
171 		list_for_each_entry_safe(p_cmd_elem,
172 					 p_tmp,
173 					 &p_hwfn->mcp_info->cmd_list, list) {
174 			qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
175 		}
176 		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
177 	}
178 
179 	kfree(p_hwfn->mcp_info);
180 
181 	return 0;
182 }
183 
184 static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
185 {
186 	struct qed_mcp_info *p_info = p_hwfn->mcp_info;
187 	u32 drv_mb_offsize, mfw_mb_offsize;
188 	u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
189 
190 	p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
191 	if (!p_info->public_base)
192 		return 0;
193 
194 	p_info->public_base |= GRCBASE_MCP;
195 
196 	/* Calculate the driver and MFW mailbox address */
197 	drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
198 				SECTION_OFFSIZE_ADDR(p_info->public_base,
199 						     PUBLIC_DRV_MB));
200 	p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
201 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
202 		   "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
203 		   drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
204 
205 	/* Set the MFW MB address */
206 	mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
207 				SECTION_OFFSIZE_ADDR(p_info->public_base,
208 						     PUBLIC_MFW_MB));
209 	p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
210 	p_info->mfw_mb_length =	(u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
211 
212 	/* Get the current driver mailbox sequence before sending
213 	 * the first command
214 	 */
215 	p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
216 			     DRV_MSG_SEQ_NUMBER_MASK;
217 
218 	/* Get current FW pulse sequence */
219 	p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
220 				DRV_PULSE_SEQ_MASK;
221 
222 	p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
223 
224 	return 0;
225 }
226 
227 int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
228 {
229 	struct qed_mcp_info *p_info;
230 	u32 size;
231 
232 	/* Allocate mcp_info structure */
233 	p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
234 	if (!p_hwfn->mcp_info)
235 		goto err;
236 	p_info = p_hwfn->mcp_info;
237 
238 	/* Initialize the MFW spinlock */
239 	spin_lock_init(&p_info->cmd_lock);
240 	spin_lock_init(&p_info->link_lock);
241 
242 	INIT_LIST_HEAD(&p_info->cmd_list);
243 
244 	if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
245 		DP_NOTICE(p_hwfn, "MCP is not initialized\n");
246 		/* Do not free mcp_info here, since public_base indicate that
247 		 * the MCP is not initialized
248 		 */
249 		return 0;
250 	}
251 
252 	size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
253 	p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
254 	p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
255 	if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
256 		goto err;
257 
258 	return 0;
259 
260 err:
261 	qed_mcp_free(p_hwfn);
262 	return -ENOMEM;
263 }
264 
265 static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
266 				   struct qed_ptt *p_ptt)
267 {
268 	u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
269 
270 	/* Use MCP history register to check if MCP reset occurred between init
271 	 * time and now.
272 	 */
273 	if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
274 		DP_VERBOSE(p_hwfn,
275 			   QED_MSG_SP,
276 			   "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
277 			   p_hwfn->mcp_info->mcp_hist, generic_por_0);
278 
279 		qed_load_mcp_offsets(p_hwfn, p_ptt);
280 		qed_mcp_cmd_port_init(p_hwfn, p_ptt);
281 	}
282 }
283 
284 int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
285 {
286 	u32 org_mcp_reset_seq, seq, delay = CHIP_MCP_RESP_ITER_US, cnt = 0;
287 	int rc = 0;
288 
289 	/* Ensure that only a single thread is accessing the mailbox */
290 	spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
291 
292 	org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
293 
294 	/* Set drv command along with the updated sequence */
295 	qed_mcp_reread_offsets(p_hwfn, p_ptt);
296 	seq = ++p_hwfn->mcp_info->drv_mb_seq;
297 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
298 
299 	do {
300 		/* Wait for MFW response */
301 		udelay(delay);
302 		/* Give the FW up to 500 second (50*1000*10usec) */
303 	} while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
304 					      MISCS_REG_GENERIC_POR_0)) &&
305 		 (cnt++ < QED_MCP_RESET_RETRIES));
306 
307 	if (org_mcp_reset_seq !=
308 	    qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
309 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
310 			   "MCP was reset after %d usec\n", cnt * delay);
311 	} else {
312 		DP_ERR(p_hwfn, "Failed to reset MCP\n");
313 		rc = -EAGAIN;
314 	}
315 
316 	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
317 
318 	return rc;
319 }
320 
321 /* Must be called while cmd_lock is acquired */
322 static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
323 {
324 	struct qed_mcp_cmd_elem *p_cmd_elem;
325 
326 	/* There is at most one pending command at a certain time, and if it
327 	 * exists - it is placed at the HEAD of the list.
328 	 */
329 	if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
330 		p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
331 					      struct qed_mcp_cmd_elem, list);
332 		return !p_cmd_elem->b_is_completed;
333 	}
334 
335 	return false;
336 }
337 
338 /* Must be called while cmd_lock is acquired */
339 static int
340 qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
341 {
342 	struct qed_mcp_mb_params *p_mb_params;
343 	struct qed_mcp_cmd_elem *p_cmd_elem;
344 	u32 mcp_resp;
345 	u16 seq_num;
346 
347 	mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
348 	seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
349 
350 	/* Return if no new non-handled response has been received */
351 	if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
352 		return -EAGAIN;
353 
354 	p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
355 	if (!p_cmd_elem) {
356 		DP_ERR(p_hwfn,
357 		       "Failed to find a pending mailbox cmd that expects sequence number %d\n",
358 		       seq_num);
359 		return -EINVAL;
360 	}
361 
362 	p_mb_params = p_cmd_elem->p_mb_params;
363 
364 	/* Get the MFW response along with the sequence number */
365 	p_mb_params->mcp_resp = mcp_resp;
366 
367 	/* Get the MFW param */
368 	p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
369 
370 	/* Get the union data */
371 	if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
372 		u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
373 				      offsetof(struct public_drv_mb,
374 					       union_data);
375 		qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
376 				union_data_addr, p_mb_params->data_dst_size);
377 	}
378 
379 	p_cmd_elem->b_is_completed = true;
380 
381 	return 0;
382 }
383 
384 /* Must be called while cmd_lock is acquired */
385 static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
386 				    struct qed_ptt *p_ptt,
387 				    struct qed_mcp_mb_params *p_mb_params,
388 				    u16 seq_num)
389 {
390 	union drv_union_data union_data;
391 	u32 union_data_addr;
392 
393 	/* Set the union data */
394 	union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
395 			  offsetof(struct public_drv_mb, union_data);
396 	memset(&union_data, 0, sizeof(union_data));
397 	if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
398 		memcpy(&union_data, p_mb_params->p_data_src,
399 		       p_mb_params->data_src_size);
400 	qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
401 		      sizeof(union_data));
402 
403 	/* Set the drv param */
404 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
405 
406 	/* Set the drv command along with the sequence number */
407 	DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
408 
409 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
410 		   "MFW mailbox: command 0x%08x param 0x%08x\n",
411 		   (p_mb_params->cmd | seq_num), p_mb_params->param);
412 }
413 
414 static int
415 _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
416 		       struct qed_ptt *p_ptt,
417 		       struct qed_mcp_mb_params *p_mb_params,
418 		       u32 max_retries, u32 delay)
419 {
420 	struct qed_mcp_cmd_elem *p_cmd_elem;
421 	u32 cnt = 0;
422 	u16 seq_num;
423 	int rc = 0;
424 
425 	/* Wait until the mailbox is non-occupied */
426 	do {
427 		/* Exit the loop if there is no pending command, or if the
428 		 * pending command is completed during this iteration.
429 		 * The spinlock stays locked until the command is sent.
430 		 */
431 
432 		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
433 
434 		if (!qed_mcp_has_pending_cmd(p_hwfn))
435 			break;
436 
437 		rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
438 		if (!rc)
439 			break;
440 		else if (rc != -EAGAIN)
441 			goto err;
442 
443 		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
444 		udelay(delay);
445 	} while (++cnt < max_retries);
446 
447 	if (cnt >= max_retries) {
448 		DP_NOTICE(p_hwfn,
449 			  "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
450 			  p_mb_params->cmd, p_mb_params->param);
451 		return -EAGAIN;
452 	}
453 
454 	/* Send the mailbox command */
455 	qed_mcp_reread_offsets(p_hwfn, p_ptt);
456 	seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
457 	p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
458 	if (!p_cmd_elem) {
459 		rc = -ENOMEM;
460 		goto err;
461 	}
462 
463 	__qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
464 	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
465 
466 	/* Wait for the MFW response */
467 	do {
468 		/* Exit the loop if the command is already completed, or if the
469 		 * command is completed during this iteration.
470 		 * The spinlock stays locked until the list element is removed.
471 		 */
472 
473 		udelay(delay);
474 		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
475 
476 		if (p_cmd_elem->b_is_completed)
477 			break;
478 
479 		rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
480 		if (!rc)
481 			break;
482 		else if (rc != -EAGAIN)
483 			goto err;
484 
485 		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
486 	} while (++cnt < max_retries);
487 
488 	if (cnt >= max_retries) {
489 		DP_NOTICE(p_hwfn,
490 			  "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
491 			  p_mb_params->cmd, p_mb_params->param);
492 
493 		spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
494 		qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
495 		spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
496 
497 		return -EAGAIN;
498 	}
499 
500 	qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
501 	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
502 
503 	DP_VERBOSE(p_hwfn,
504 		   QED_MSG_SP,
505 		   "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
506 		   p_mb_params->mcp_resp,
507 		   p_mb_params->mcp_param,
508 		   (cnt * delay) / 1000, (cnt * delay) % 1000);
509 
510 	/* Clear the sequence number from the MFW response */
511 	p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
512 
513 	return 0;
514 
515 err:
516 	spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
517 	return rc;
518 }
519 
520 static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
521 				 struct qed_ptt *p_ptt,
522 				 struct qed_mcp_mb_params *p_mb_params)
523 {
524 	size_t union_data_size = sizeof(union drv_union_data);
525 	u32 max_retries = QED_DRV_MB_MAX_RETRIES;
526 	u32 delay = CHIP_MCP_RESP_ITER_US;
527 
528 	/* MCP not initialized */
529 	if (!qed_mcp_is_init(p_hwfn)) {
530 		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
531 		return -EBUSY;
532 	}
533 
534 	if (p_mb_params->data_src_size > union_data_size ||
535 	    p_mb_params->data_dst_size > union_data_size) {
536 		DP_ERR(p_hwfn,
537 		       "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
538 		       p_mb_params->data_src_size,
539 		       p_mb_params->data_dst_size, union_data_size);
540 		return -EINVAL;
541 	}
542 
543 	return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
544 				      delay);
545 }
546 
547 int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
548 		struct qed_ptt *p_ptt,
549 		u32 cmd,
550 		u32 param,
551 		u32 *o_mcp_resp,
552 		u32 *o_mcp_param)
553 {
554 	struct qed_mcp_mb_params mb_params;
555 	int rc;
556 
557 	memset(&mb_params, 0, sizeof(mb_params));
558 	mb_params.cmd = cmd;
559 	mb_params.param = param;
560 
561 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
562 	if (rc)
563 		return rc;
564 
565 	*o_mcp_resp = mb_params.mcp_resp;
566 	*o_mcp_param = mb_params.mcp_param;
567 
568 	return 0;
569 }
570 
571 int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
572 		       struct qed_ptt *p_ptt,
573 		       u32 cmd,
574 		       u32 param,
575 		       u32 *o_mcp_resp,
576 		       u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
577 {
578 	struct qed_mcp_mb_params mb_params;
579 	u8 raw_data[MCP_DRV_NVM_BUF_LEN];
580 	int rc;
581 
582 	memset(&mb_params, 0, sizeof(mb_params));
583 	mb_params.cmd = cmd;
584 	mb_params.param = param;
585 	mb_params.p_data_dst = raw_data;
586 
587 	/* Use the maximal value since the actual one is part of the response */
588 	mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
589 
590 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
591 	if (rc)
592 		return rc;
593 
594 	*o_mcp_resp = mb_params.mcp_resp;
595 	*o_mcp_param = mb_params.mcp_param;
596 
597 	*o_txn_size = *o_mcp_param;
598 	memcpy(o_buf, raw_data, *o_txn_size);
599 
600 	return 0;
601 }
602 
603 static bool
604 qed_mcp_can_force_load(u8 drv_role,
605 		       u8 exist_drv_role,
606 		       enum qed_override_force_load override_force_load)
607 {
608 	bool can_force_load = false;
609 
610 	switch (override_force_load) {
611 	case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
612 		can_force_load = true;
613 		break;
614 	case QED_OVERRIDE_FORCE_LOAD_NEVER:
615 		can_force_load = false;
616 		break;
617 	default:
618 		can_force_load = (drv_role == DRV_ROLE_OS &&
619 				  exist_drv_role == DRV_ROLE_PREBOOT) ||
620 				 (drv_role == DRV_ROLE_KDUMP &&
621 				  exist_drv_role == DRV_ROLE_OS);
622 		break;
623 	}
624 
625 	return can_force_load;
626 }
627 
628 static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
629 				   struct qed_ptt *p_ptt)
630 {
631 	u32 resp = 0, param = 0;
632 	int rc;
633 
634 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
635 			 &resp, &param);
636 	if (rc)
637 		DP_NOTICE(p_hwfn,
638 			  "Failed to send cancel load request, rc = %d\n", rc);
639 
640 	return rc;
641 }
642 
643 #define CONFIG_QEDE_BITMAP_IDX		BIT(0)
644 #define CONFIG_QED_SRIOV_BITMAP_IDX	BIT(1)
645 #define CONFIG_QEDR_BITMAP_IDX		BIT(2)
646 #define CONFIG_QEDF_BITMAP_IDX		BIT(4)
647 #define CONFIG_QEDI_BITMAP_IDX		BIT(5)
648 #define CONFIG_QED_LL2_BITMAP_IDX	BIT(6)
649 
650 static u32 qed_get_config_bitmap(void)
651 {
652 	u32 config_bitmap = 0x0;
653 
654 	if (IS_ENABLED(CONFIG_QEDE))
655 		config_bitmap |= CONFIG_QEDE_BITMAP_IDX;
656 
657 	if (IS_ENABLED(CONFIG_QED_SRIOV))
658 		config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX;
659 
660 	if (IS_ENABLED(CONFIG_QED_RDMA))
661 		config_bitmap |= CONFIG_QEDR_BITMAP_IDX;
662 
663 	if (IS_ENABLED(CONFIG_QED_FCOE))
664 		config_bitmap |= CONFIG_QEDF_BITMAP_IDX;
665 
666 	if (IS_ENABLED(CONFIG_QED_ISCSI))
667 		config_bitmap |= CONFIG_QEDI_BITMAP_IDX;
668 
669 	if (IS_ENABLED(CONFIG_QED_LL2))
670 		config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX;
671 
672 	return config_bitmap;
673 }
674 
675 struct qed_load_req_in_params {
676 	u8 hsi_ver;
677 #define QED_LOAD_REQ_HSI_VER_DEFAULT	0
678 #define QED_LOAD_REQ_HSI_VER_1		1
679 	u32 drv_ver_0;
680 	u32 drv_ver_1;
681 	u32 fw_ver;
682 	u8 drv_role;
683 	u8 timeout_val;
684 	u8 force_cmd;
685 	bool avoid_eng_reset;
686 };
687 
688 struct qed_load_req_out_params {
689 	u32 load_code;
690 	u32 exist_drv_ver_0;
691 	u32 exist_drv_ver_1;
692 	u32 exist_fw_ver;
693 	u8 exist_drv_role;
694 	u8 mfw_hsi_ver;
695 	bool drv_exists;
696 };
697 
698 static int
699 __qed_mcp_load_req(struct qed_hwfn *p_hwfn,
700 		   struct qed_ptt *p_ptt,
701 		   struct qed_load_req_in_params *p_in_params,
702 		   struct qed_load_req_out_params *p_out_params)
703 {
704 	struct qed_mcp_mb_params mb_params;
705 	struct load_req_stc load_req;
706 	struct load_rsp_stc load_rsp;
707 	u32 hsi_ver;
708 	int rc;
709 
710 	memset(&load_req, 0, sizeof(load_req));
711 	load_req.drv_ver_0 = p_in_params->drv_ver_0;
712 	load_req.drv_ver_1 = p_in_params->drv_ver_1;
713 	load_req.fw_ver = p_in_params->fw_ver;
714 	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
715 	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
716 			  p_in_params->timeout_val);
717 	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
718 			  p_in_params->force_cmd);
719 	QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
720 			  p_in_params->avoid_eng_reset);
721 
722 	hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
723 		  DRV_ID_MCP_HSI_VER_CURRENT :
724 		  (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
725 
726 	memset(&mb_params, 0, sizeof(mb_params));
727 	mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
728 	mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
729 	mb_params.p_data_src = &load_req;
730 	mb_params.data_src_size = sizeof(load_req);
731 	mb_params.p_data_dst = &load_rsp;
732 	mb_params.data_dst_size = sizeof(load_rsp);
733 
734 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
735 		   "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
736 		   mb_params.param,
737 		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
738 		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
739 		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
740 		   QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
741 
742 	if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
743 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
744 			   "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
745 			   load_req.drv_ver_0,
746 			   load_req.drv_ver_1,
747 			   load_req.fw_ver,
748 			   load_req.misc0,
749 			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
750 			   QED_MFW_GET_FIELD(load_req.misc0,
751 					     LOAD_REQ_LOCK_TO),
752 			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
753 			   QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
754 	}
755 
756 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
757 	if (rc) {
758 		DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
759 		return rc;
760 	}
761 
762 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
763 		   "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
764 	p_out_params->load_code = mb_params.mcp_resp;
765 
766 	if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
767 	    p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
768 		DP_VERBOSE(p_hwfn,
769 			   QED_MSG_SP,
770 			   "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
771 			   load_rsp.drv_ver_0,
772 			   load_rsp.drv_ver_1,
773 			   load_rsp.fw_ver,
774 			   load_rsp.misc0,
775 			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
776 			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
777 			   QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
778 
779 		p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
780 		p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
781 		p_out_params->exist_fw_ver = load_rsp.fw_ver;
782 		p_out_params->exist_drv_role =
783 		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
784 		p_out_params->mfw_hsi_ver =
785 		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
786 		p_out_params->drv_exists =
787 		    QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
788 		    LOAD_RSP_FLAGS0_DRV_EXISTS;
789 	}
790 
791 	return 0;
792 }
793 
794 static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
795 				  enum qed_drv_role drv_role,
796 				  u8 *p_mfw_drv_role)
797 {
798 	switch (drv_role) {
799 	case QED_DRV_ROLE_OS:
800 		*p_mfw_drv_role = DRV_ROLE_OS;
801 		break;
802 	case QED_DRV_ROLE_KDUMP:
803 		*p_mfw_drv_role = DRV_ROLE_KDUMP;
804 		break;
805 	default:
806 		DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
807 		return -EINVAL;
808 	}
809 
810 	return 0;
811 }
812 
813 enum qed_load_req_force {
814 	QED_LOAD_REQ_FORCE_NONE,
815 	QED_LOAD_REQ_FORCE_PF,
816 	QED_LOAD_REQ_FORCE_ALL,
817 };
818 
819 static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
820 
821 				  enum qed_load_req_force force_cmd,
822 				  u8 *p_mfw_force_cmd)
823 {
824 	switch (force_cmd) {
825 	case QED_LOAD_REQ_FORCE_NONE:
826 		*p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
827 		break;
828 	case QED_LOAD_REQ_FORCE_PF:
829 		*p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
830 		break;
831 	case QED_LOAD_REQ_FORCE_ALL:
832 		*p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
833 		break;
834 	}
835 }
836 
837 int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
838 		     struct qed_ptt *p_ptt,
839 		     struct qed_load_req_params *p_params)
840 {
841 	struct qed_load_req_out_params out_params;
842 	struct qed_load_req_in_params in_params;
843 	u8 mfw_drv_role, mfw_force_cmd;
844 	int rc;
845 
846 	memset(&in_params, 0, sizeof(in_params));
847 	in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
848 	in_params.drv_ver_0 = QED_VERSION;
849 	in_params.drv_ver_1 = qed_get_config_bitmap();
850 	in_params.fw_ver = STORM_FW_VERSION;
851 	rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
852 	if (rc)
853 		return rc;
854 
855 	in_params.drv_role = mfw_drv_role;
856 	in_params.timeout_val = p_params->timeout_val;
857 	qed_get_mfw_force_cmd(p_hwfn,
858 			      QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
859 
860 	in_params.force_cmd = mfw_force_cmd;
861 	in_params.avoid_eng_reset = p_params->avoid_eng_reset;
862 
863 	memset(&out_params, 0, sizeof(out_params));
864 	rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
865 	if (rc)
866 		return rc;
867 
868 	/* First handle cases where another load request should/might be sent:
869 	 * - MFW expects the old interface [HSI version = 1]
870 	 * - MFW responds that a force load request is required
871 	 */
872 	if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
873 		DP_INFO(p_hwfn,
874 			"MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
875 
876 		in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
877 		memset(&out_params, 0, sizeof(out_params));
878 		rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
879 		if (rc)
880 			return rc;
881 	} else if (out_params.load_code ==
882 		   FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
883 		if (qed_mcp_can_force_load(in_params.drv_role,
884 					   out_params.exist_drv_role,
885 					   p_params->override_force_load)) {
886 			DP_INFO(p_hwfn,
887 				"A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
888 				in_params.drv_role, in_params.fw_ver,
889 				in_params.drv_ver_0, in_params.drv_ver_1,
890 				out_params.exist_drv_role,
891 				out_params.exist_fw_ver,
892 				out_params.exist_drv_ver_0,
893 				out_params.exist_drv_ver_1);
894 
895 			qed_get_mfw_force_cmd(p_hwfn,
896 					      QED_LOAD_REQ_FORCE_ALL,
897 					      &mfw_force_cmd);
898 
899 			in_params.force_cmd = mfw_force_cmd;
900 			memset(&out_params, 0, sizeof(out_params));
901 			rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
902 						&out_params);
903 			if (rc)
904 				return rc;
905 		} else {
906 			DP_NOTICE(p_hwfn,
907 				  "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
908 				  in_params.drv_role, in_params.fw_ver,
909 				  in_params.drv_ver_0, in_params.drv_ver_1,
910 				  out_params.exist_drv_role,
911 				  out_params.exist_fw_ver,
912 				  out_params.exist_drv_ver_0,
913 				  out_params.exist_drv_ver_1);
914 			DP_NOTICE(p_hwfn,
915 				  "Avoid sending a force load request to prevent disruption of active PFs\n");
916 
917 			qed_mcp_cancel_load_req(p_hwfn, p_ptt);
918 			return -EBUSY;
919 		}
920 	}
921 
922 	/* Now handle the other types of responses.
923 	 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
924 	 * expected here after the additional revised load requests were sent.
925 	 */
926 	switch (out_params.load_code) {
927 	case FW_MSG_CODE_DRV_LOAD_ENGINE:
928 	case FW_MSG_CODE_DRV_LOAD_PORT:
929 	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
930 		if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
931 		    out_params.drv_exists) {
932 			/* The role and fw/driver version match, but the PF is
933 			 * already loaded and has not been unloaded gracefully.
934 			 */
935 			DP_NOTICE(p_hwfn,
936 				  "PF is already loaded\n");
937 			return -EINVAL;
938 		}
939 		break;
940 	default:
941 		DP_NOTICE(p_hwfn,
942 			  "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
943 			  out_params.load_code);
944 		return -EBUSY;
945 	}
946 
947 	p_params->load_code = out_params.load_code;
948 
949 	return 0;
950 }
951 
952 int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
953 {
954 	u32 wol_param, mcp_resp, mcp_param;
955 
956 	switch (p_hwfn->cdev->wol_config) {
957 	case QED_OV_WOL_DISABLED:
958 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
959 		break;
960 	case QED_OV_WOL_ENABLED:
961 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
962 		break;
963 	default:
964 		DP_NOTICE(p_hwfn,
965 			  "Unknown WoL configuration %02x\n",
966 			  p_hwfn->cdev->wol_config);
967 		/* Fallthrough */
968 	case QED_OV_WOL_DEFAULT:
969 		wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
970 	}
971 
972 	return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_UNLOAD_REQ, wol_param,
973 			   &mcp_resp, &mcp_param);
974 }
975 
976 int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
977 {
978 	struct qed_mcp_mb_params mb_params;
979 	struct mcp_mac wol_mac;
980 
981 	memset(&mb_params, 0, sizeof(mb_params));
982 	mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
983 
984 	/* Set the primary MAC if WoL is enabled */
985 	if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
986 		u8 *p_mac = p_hwfn->cdev->wol_mac;
987 
988 		memset(&wol_mac, 0, sizeof(wol_mac));
989 		wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
990 		wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
991 				    p_mac[4] << 8 | p_mac[5];
992 
993 		DP_VERBOSE(p_hwfn,
994 			   (QED_MSG_SP | NETIF_MSG_IFDOWN),
995 			   "Setting WoL MAC: %pM --> [%08x,%08x]\n",
996 			   p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
997 
998 		mb_params.p_data_src = &wol_mac;
999 		mb_params.data_src_size = sizeof(wol_mac);
1000 	}
1001 
1002 	return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1003 }
1004 
1005 static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
1006 				  struct qed_ptt *p_ptt)
1007 {
1008 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1009 					PUBLIC_PATH);
1010 	u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1011 	u32 path_addr = SECTION_ADDR(mfw_path_offsize,
1012 				     QED_PATH_ID(p_hwfn));
1013 	u32 disabled_vfs[VF_MAX_STATIC / 32];
1014 	int i;
1015 
1016 	DP_VERBOSE(p_hwfn,
1017 		   QED_MSG_SP,
1018 		   "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1019 		   mfw_path_offsize, path_addr);
1020 
1021 	for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
1022 		disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
1023 					 path_addr +
1024 					 offsetof(struct public_path,
1025 						  mcp_vf_disabled) +
1026 					 sizeof(u32) * i);
1027 		DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1028 			   "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1029 			   i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
1030 	}
1031 
1032 	if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
1033 		qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
1034 }
1035 
1036 int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
1037 		       struct qed_ptt *p_ptt, u32 *vfs_to_ack)
1038 {
1039 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1040 					PUBLIC_FUNC);
1041 	u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
1042 	u32 func_addr = SECTION_ADDR(mfw_func_offsize,
1043 				     MCP_PF_ID(p_hwfn));
1044 	struct qed_mcp_mb_params mb_params;
1045 	int rc;
1046 	int i;
1047 
1048 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1049 		DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
1050 			   "Acking VFs [%08x,...,%08x] - %08x\n",
1051 			   i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
1052 
1053 	memset(&mb_params, 0, sizeof(mb_params));
1054 	mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
1055 	mb_params.p_data_src = vfs_to_ack;
1056 	mb_params.data_src_size = VF_MAX_STATIC / 8;
1057 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1058 	if (rc) {
1059 		DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
1060 		return -EBUSY;
1061 	}
1062 
1063 	/* Clear the ACK bits */
1064 	for (i = 0; i < (VF_MAX_STATIC / 32); i++)
1065 		qed_wr(p_hwfn, p_ptt,
1066 		       func_addr +
1067 		       offsetof(struct public_func, drv_ack_vf_disabled) +
1068 		       i * sizeof(u32), 0);
1069 
1070 	return rc;
1071 }
1072 
1073 static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
1074 					      struct qed_ptt *p_ptt)
1075 {
1076 	u32 transceiver_state;
1077 
1078 	transceiver_state = qed_rd(p_hwfn, p_ptt,
1079 				   p_hwfn->mcp_info->port_addr +
1080 				   offsetof(struct public_port,
1081 					    transceiver_data));
1082 
1083 	DP_VERBOSE(p_hwfn,
1084 		   (NETIF_MSG_HW | QED_MSG_SP),
1085 		   "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1086 		   transceiver_state,
1087 		   (u32)(p_hwfn->mcp_info->port_addr +
1088 			  offsetof(struct public_port, transceiver_data)));
1089 
1090 	transceiver_state = GET_FIELD(transceiver_state,
1091 				      ETH_TRANSCEIVER_STATE);
1092 
1093 	if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
1094 		DP_NOTICE(p_hwfn, "Transceiver is present.\n");
1095 	else
1096 		DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
1097 }
1098 
1099 static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1100 				       struct qed_ptt *p_ptt, bool b_reset)
1101 {
1102 	struct qed_mcp_link_state *p_link;
1103 	u8 max_bw, min_bw;
1104 	u32 status = 0;
1105 
1106 	/* Prevent SW/attentions from doing this at the same time */
1107 	spin_lock_bh(&p_hwfn->mcp_info->link_lock);
1108 
1109 	p_link = &p_hwfn->mcp_info->link_output;
1110 	memset(p_link, 0, sizeof(*p_link));
1111 	if (!b_reset) {
1112 		status = qed_rd(p_hwfn, p_ptt,
1113 				p_hwfn->mcp_info->port_addr +
1114 				offsetof(struct public_port, link_status));
1115 		DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
1116 			   "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1117 			   status,
1118 			   (u32)(p_hwfn->mcp_info->port_addr +
1119 				 offsetof(struct public_port, link_status)));
1120 	} else {
1121 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1122 			   "Resetting link indications\n");
1123 		goto out;
1124 	}
1125 
1126 	if (p_hwfn->b_drv_link_init)
1127 		p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
1128 	else
1129 		p_link->link_up = false;
1130 
1131 	p_link->full_duplex = true;
1132 	switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
1133 	case LINK_STATUS_SPEED_AND_DUPLEX_100G:
1134 		p_link->speed = 100000;
1135 		break;
1136 	case LINK_STATUS_SPEED_AND_DUPLEX_50G:
1137 		p_link->speed = 50000;
1138 		break;
1139 	case LINK_STATUS_SPEED_AND_DUPLEX_40G:
1140 		p_link->speed = 40000;
1141 		break;
1142 	case LINK_STATUS_SPEED_AND_DUPLEX_25G:
1143 		p_link->speed = 25000;
1144 		break;
1145 	case LINK_STATUS_SPEED_AND_DUPLEX_20G:
1146 		p_link->speed = 20000;
1147 		break;
1148 	case LINK_STATUS_SPEED_AND_DUPLEX_10G:
1149 		p_link->speed = 10000;
1150 		break;
1151 	case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
1152 		p_link->full_duplex = false;
1153 	/* Fall-through */
1154 	case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
1155 		p_link->speed = 1000;
1156 		break;
1157 	default:
1158 		p_link->speed = 0;
1159 	}
1160 
1161 	if (p_link->link_up && p_link->speed)
1162 		p_link->line_speed = p_link->speed;
1163 	else
1164 		p_link->line_speed = 0;
1165 
1166 	max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
1167 	min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
1168 
1169 	/* Max bandwidth configuration */
1170 	__qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
1171 
1172 	/* Min bandwidth configuration */
1173 	__qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
1174 	qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
1175 					    p_link->min_pf_rate);
1176 
1177 	p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
1178 	p_link->an_complete = !!(status &
1179 				 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
1180 	p_link->parallel_detection = !!(status &
1181 					LINK_STATUS_PARALLEL_DETECTION_USED);
1182 	p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
1183 
1184 	p_link->partner_adv_speed |=
1185 		(status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
1186 		QED_LINK_PARTNER_SPEED_1G_FD : 0;
1187 	p_link->partner_adv_speed |=
1188 		(status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
1189 		QED_LINK_PARTNER_SPEED_1G_HD : 0;
1190 	p_link->partner_adv_speed |=
1191 		(status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
1192 		QED_LINK_PARTNER_SPEED_10G : 0;
1193 	p_link->partner_adv_speed |=
1194 		(status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
1195 		QED_LINK_PARTNER_SPEED_20G : 0;
1196 	p_link->partner_adv_speed |=
1197 		(status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
1198 		QED_LINK_PARTNER_SPEED_25G : 0;
1199 	p_link->partner_adv_speed |=
1200 		(status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
1201 		QED_LINK_PARTNER_SPEED_40G : 0;
1202 	p_link->partner_adv_speed |=
1203 		(status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
1204 		QED_LINK_PARTNER_SPEED_50G : 0;
1205 	p_link->partner_adv_speed |=
1206 		(status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
1207 		QED_LINK_PARTNER_SPEED_100G : 0;
1208 
1209 	p_link->partner_tx_flow_ctrl_en =
1210 		!!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
1211 	p_link->partner_rx_flow_ctrl_en =
1212 		!!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
1213 
1214 	switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
1215 	case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
1216 		p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
1217 		break;
1218 	case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
1219 		p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
1220 		break;
1221 	case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
1222 		p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
1223 		break;
1224 	default:
1225 		p_link->partner_adv_pause = 0;
1226 	}
1227 
1228 	p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
1229 
1230 	qed_link_update(p_hwfn);
1231 out:
1232 	spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
1233 }
1234 
1235 int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
1236 {
1237 	struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
1238 	struct qed_mcp_mb_params mb_params;
1239 	struct eth_phy_cfg phy_cfg;
1240 	int rc = 0;
1241 	u32 cmd;
1242 
1243 	/* Set the shmem configuration according to params */
1244 	memset(&phy_cfg, 0, sizeof(phy_cfg));
1245 	cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
1246 	if (!params->speed.autoneg)
1247 		phy_cfg.speed = params->speed.forced_speed;
1248 	phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
1249 	phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
1250 	phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1251 	phy_cfg.adv_speed = params->speed.advertised_speeds;
1252 	phy_cfg.loopback_mode = params->loopback_mode;
1253 
1254 	p_hwfn->b_drv_link_init = b_up;
1255 
1256 	if (b_up) {
1257 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1258 			   "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
1259 			   phy_cfg.speed,
1260 			   phy_cfg.pause,
1261 			   phy_cfg.adv_speed,
1262 			   phy_cfg.loopback_mode,
1263 			   phy_cfg.feature_config_flags);
1264 	} else {
1265 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1266 			   "Resetting link\n");
1267 	}
1268 
1269 	memset(&mb_params, 0, sizeof(mb_params));
1270 	mb_params.cmd = cmd;
1271 	mb_params.p_data_src = &phy_cfg;
1272 	mb_params.data_src_size = sizeof(phy_cfg);
1273 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1274 
1275 	/* if mcp fails to respond we must abort */
1276 	if (rc) {
1277 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1278 		return rc;
1279 	}
1280 
1281 	/* Mimic link-change attention, done for several reasons:
1282 	 *  - On reset, there's no guarantee MFW would trigger
1283 	 *    an attention.
1284 	 *  - On initialization, older MFWs might not indicate link change
1285 	 *    during LFA, so we'll never get an UP indication.
1286 	 */
1287 	qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
1288 
1289 	return 0;
1290 }
1291 
1292 static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
1293 					struct qed_ptt *p_ptt,
1294 					enum MFW_DRV_MSG_TYPE type)
1295 {
1296 	enum qed_mcp_protocol_type stats_type;
1297 	union qed_mcp_protocol_stats stats;
1298 	struct qed_mcp_mb_params mb_params;
1299 	u32 hsi_param;
1300 
1301 	switch (type) {
1302 	case MFW_DRV_MSG_GET_LAN_STATS:
1303 		stats_type = QED_MCP_LAN_STATS;
1304 		hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
1305 		break;
1306 	case MFW_DRV_MSG_GET_FCOE_STATS:
1307 		stats_type = QED_MCP_FCOE_STATS;
1308 		hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
1309 		break;
1310 	case MFW_DRV_MSG_GET_ISCSI_STATS:
1311 		stats_type = QED_MCP_ISCSI_STATS;
1312 		hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
1313 		break;
1314 	case MFW_DRV_MSG_GET_RDMA_STATS:
1315 		stats_type = QED_MCP_RDMA_STATS;
1316 		hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
1317 		break;
1318 	default:
1319 		DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
1320 		return;
1321 	}
1322 
1323 	qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
1324 
1325 	memset(&mb_params, 0, sizeof(mb_params));
1326 	mb_params.cmd = DRV_MSG_CODE_GET_STATS;
1327 	mb_params.param = hsi_param;
1328 	mb_params.p_data_src = &stats;
1329 	mb_params.data_src_size = sizeof(stats);
1330 	qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1331 }
1332 
1333 static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
1334 				  struct public_func *p_shmem_info)
1335 {
1336 	struct qed_mcp_function_info *p_info;
1337 
1338 	p_info = &p_hwfn->mcp_info->func_info;
1339 
1340 	p_info->bandwidth_min = (p_shmem_info->config &
1341 				 FUNC_MF_CFG_MIN_BW_MASK) >>
1342 					FUNC_MF_CFG_MIN_BW_SHIFT;
1343 	if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
1344 		DP_INFO(p_hwfn,
1345 			"bandwidth minimum out of bounds [%02x]. Set to 1\n",
1346 			p_info->bandwidth_min);
1347 		p_info->bandwidth_min = 1;
1348 	}
1349 
1350 	p_info->bandwidth_max = (p_shmem_info->config &
1351 				 FUNC_MF_CFG_MAX_BW_MASK) >>
1352 					FUNC_MF_CFG_MAX_BW_SHIFT;
1353 	if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
1354 		DP_INFO(p_hwfn,
1355 			"bandwidth maximum out of bounds [%02x]. Set to 100\n",
1356 			p_info->bandwidth_max);
1357 		p_info->bandwidth_max = 100;
1358 	}
1359 }
1360 
1361 static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
1362 				  struct qed_ptt *p_ptt,
1363 				  struct public_func *p_data, int pfid)
1364 {
1365 	u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
1366 					PUBLIC_FUNC);
1367 	u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
1368 	u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
1369 	u32 i, size;
1370 
1371 	memset(p_data, 0, sizeof(*p_data));
1372 
1373 	size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
1374 	for (i = 0; i < size / sizeof(u32); i++)
1375 		((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
1376 					    func_addr + (i << 2));
1377 	return size;
1378 }
1379 
1380 static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1381 {
1382 	struct qed_mcp_function_info *p_info;
1383 	struct public_func shmem_info;
1384 	u32 resp = 0, param = 0;
1385 
1386 	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1387 
1388 	qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1389 
1390 	p_info = &p_hwfn->mcp_info->func_info;
1391 
1392 	qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
1393 	qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
1394 
1395 	/* Acknowledge the MFW */
1396 	qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
1397 		    &param);
1398 }
1399 
1400 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
1401 			  struct qed_ptt *p_ptt)
1402 {
1403 	struct qed_mcp_info *info = p_hwfn->mcp_info;
1404 	int rc = 0;
1405 	bool found = false;
1406 	u16 i;
1407 
1408 	DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
1409 
1410 	/* Read Messages from MFW */
1411 	qed_mcp_read_mb(p_hwfn, p_ptt);
1412 
1413 	/* Compare current messages to old ones */
1414 	for (i = 0; i < info->mfw_mb_length; i++) {
1415 		if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
1416 			continue;
1417 
1418 		found = true;
1419 
1420 		DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
1421 			   "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1422 			   i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
1423 
1424 		switch (i) {
1425 		case MFW_DRV_MSG_LINK_CHANGE:
1426 			qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
1427 			break;
1428 		case MFW_DRV_MSG_VF_DISABLED:
1429 			qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
1430 			break;
1431 		case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1432 			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1433 						  QED_DCBX_REMOTE_LLDP_MIB);
1434 			break;
1435 		case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1436 			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1437 						  QED_DCBX_REMOTE_MIB);
1438 			break;
1439 		case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1440 			qed_dcbx_mib_update_event(p_hwfn, p_ptt,
1441 						  QED_DCBX_OPERATIONAL_MIB);
1442 			break;
1443 		case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1444 			qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1445 			break;
1446 		case MFW_DRV_MSG_GET_LAN_STATS:
1447 		case MFW_DRV_MSG_GET_FCOE_STATS:
1448 		case MFW_DRV_MSG_GET_ISCSI_STATS:
1449 		case MFW_DRV_MSG_GET_RDMA_STATS:
1450 			qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1451 			break;
1452 		case MFW_DRV_MSG_BW_UPDATE:
1453 			qed_mcp_update_bw(p_hwfn, p_ptt);
1454 			break;
1455 		default:
1456 			DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
1457 			rc = -EINVAL;
1458 		}
1459 	}
1460 
1461 	/* ACK everything */
1462 	for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1463 		__be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
1464 
1465 		/* MFW expect answer in BE, so we force write in that format */
1466 		qed_wr(p_hwfn, p_ptt,
1467 		       info->mfw_mb_addr + sizeof(u32) +
1468 		       MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1469 		       sizeof(u32) + i * sizeof(u32),
1470 		       (__force u32)val);
1471 	}
1472 
1473 	if (!found) {
1474 		DP_NOTICE(p_hwfn,
1475 			  "Received an MFW message indication but no new message!\n");
1476 		rc = -EINVAL;
1477 	}
1478 
1479 	/* Copy the new mfw messages into the shadow */
1480 	memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1481 
1482 	return rc;
1483 }
1484 
1485 int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
1486 			struct qed_ptt *p_ptt,
1487 			u32 *p_mfw_ver, u32 *p_running_bundle_id)
1488 {
1489 	u32 global_offsize;
1490 
1491 	if (IS_VF(p_hwfn->cdev)) {
1492 		if (p_hwfn->vf_iov_info) {
1493 			struct pfvf_acquire_resp_tlv *p_resp;
1494 
1495 			p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1496 			*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1497 			return 0;
1498 		} else {
1499 			DP_VERBOSE(p_hwfn,
1500 				   QED_MSG_IOV,
1501 				   "VF requested MFW version prior to ACQUIRE\n");
1502 			return -EINVAL;
1503 		}
1504 	}
1505 
1506 	global_offsize = qed_rd(p_hwfn, p_ptt,
1507 				SECTION_OFFSIZE_ADDR(p_hwfn->
1508 						     mcp_info->public_base,
1509 						     PUBLIC_GLOBAL));
1510 	*p_mfw_ver =
1511 	    qed_rd(p_hwfn, p_ptt,
1512 		   SECTION_ADDR(global_offsize,
1513 				0) + offsetof(struct public_global, mfw_ver));
1514 
1515 	if (p_running_bundle_id != NULL) {
1516 		*p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
1517 					      SECTION_ADDR(global_offsize, 0) +
1518 					      offsetof(struct public_global,
1519 						       running_bundle_id));
1520 	}
1521 
1522 	return 0;
1523 }
1524 
1525 int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
1526 {
1527 	struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
1528 	struct qed_ptt  *p_ptt;
1529 
1530 	if (IS_VF(cdev))
1531 		return -EINVAL;
1532 
1533 	if (!qed_mcp_is_init(p_hwfn)) {
1534 		DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
1535 		return -EBUSY;
1536 	}
1537 
1538 	*p_media_type = MEDIA_UNSPECIFIED;
1539 
1540 	p_ptt = qed_ptt_acquire(p_hwfn);
1541 	if (!p_ptt)
1542 		return -EBUSY;
1543 
1544 	*p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1545 			       offsetof(struct public_port, media_type));
1546 
1547 	qed_ptt_release(p_hwfn, p_ptt);
1548 
1549 	return 0;
1550 }
1551 
1552 /* Old MFW has a global configuration for all PFs regarding RDMA support */
1553 static void
1554 qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
1555 			       enum qed_pci_personality *p_proto)
1556 {
1557 	/* There wasn't ever a legacy MFW that published iwarp.
1558 	 * So at this point, this is either plain l2 or RoCE.
1559 	 */
1560 	if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
1561 		*p_proto = QED_PCI_ETH_ROCE;
1562 	else
1563 		*p_proto = QED_PCI_ETH;
1564 
1565 	DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
1566 		   "According to Legacy capabilities, L2 personality is %08x\n",
1567 		   (u32) *p_proto);
1568 }
1569 
1570 static int
1571 qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
1572 			    struct qed_ptt *p_ptt,
1573 			    enum qed_pci_personality *p_proto)
1574 {
1575 	u32 resp = 0, param = 0;
1576 	int rc;
1577 
1578 	rc = qed_mcp_cmd(p_hwfn, p_ptt,
1579 			 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
1580 	if (rc)
1581 		return rc;
1582 	if (resp != FW_MSG_CODE_OK) {
1583 		DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
1584 			   "MFW lacks support for command; Returns %08x\n",
1585 			   resp);
1586 		return -EINVAL;
1587 	}
1588 
1589 	switch (param) {
1590 	case FW_MB_PARAM_GET_PF_RDMA_NONE:
1591 		*p_proto = QED_PCI_ETH;
1592 		break;
1593 	case FW_MB_PARAM_GET_PF_RDMA_ROCE:
1594 		*p_proto = QED_PCI_ETH_ROCE;
1595 		break;
1596 	case FW_MB_PARAM_GET_PF_RDMA_BOTH:
1597 		DP_NOTICE(p_hwfn,
1598 			  "Current day drivers don't support RoCE & iWARP. Default to RoCE-only\n");
1599 		*p_proto = QED_PCI_ETH_ROCE;
1600 		break;
1601 	case FW_MB_PARAM_GET_PF_RDMA_IWARP:
1602 	default:
1603 		DP_NOTICE(p_hwfn,
1604 			  "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
1605 			  param);
1606 		return -EINVAL;
1607 	}
1608 
1609 	DP_VERBOSE(p_hwfn,
1610 		   NETIF_MSG_IFUP,
1611 		   "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1612 		   (u32) *p_proto, resp, param);
1613 	return 0;
1614 }
1615 
1616 static int
1617 qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
1618 			struct public_func *p_info,
1619 			struct qed_ptt *p_ptt,
1620 			enum qed_pci_personality *p_proto)
1621 {
1622 	int rc = 0;
1623 
1624 	switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1625 	case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1626 		if (!IS_ENABLED(CONFIG_QED_RDMA))
1627 			*p_proto = QED_PCI_ETH;
1628 		else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
1629 			qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
1630 		break;
1631 	case FUNC_MF_CFG_PROTOCOL_ISCSI:
1632 		*p_proto = QED_PCI_ISCSI;
1633 		break;
1634 	case FUNC_MF_CFG_PROTOCOL_FCOE:
1635 		*p_proto = QED_PCI_FCOE;
1636 		break;
1637 	case FUNC_MF_CFG_PROTOCOL_ROCE:
1638 		DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
1639 	/* Fallthrough */
1640 	default:
1641 		rc = -EINVAL;
1642 	}
1643 
1644 	return rc;
1645 }
1646 
1647 int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
1648 				 struct qed_ptt *p_ptt)
1649 {
1650 	struct qed_mcp_function_info *info;
1651 	struct public_func shmem_info;
1652 
1653 	qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1654 	info = &p_hwfn->mcp_info->func_info;
1655 
1656 	info->pause_on_host = (shmem_info.config &
1657 			       FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1658 
1659 	if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
1660 				    &info->protocol)) {
1661 		DP_ERR(p_hwfn, "Unknown personality %08x\n",
1662 		       (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1663 		return -EINVAL;
1664 	}
1665 
1666 	qed_read_pf_bandwidth(p_hwfn, &shmem_info);
1667 
1668 	if (shmem_info.mac_upper || shmem_info.mac_lower) {
1669 		info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1670 		info->mac[1] = (u8)(shmem_info.mac_upper);
1671 		info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1672 		info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1673 		info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1674 		info->mac[5] = (u8)(shmem_info.mac_lower);
1675 
1676 		/* Store primary MAC for later possible WoL */
1677 		memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
1678 	} else {
1679 		DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
1680 	}
1681 
1682 	info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
1683 			 (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
1684 	info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
1685 			 (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
1686 
1687 	info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1688 
1689 	info->mtu = (u16)shmem_info.mtu_size;
1690 
1691 	p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
1692 	p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
1693 	if (qed_mcp_is_init(p_hwfn)) {
1694 		u32 resp = 0, param = 0;
1695 		int rc;
1696 
1697 		rc = qed_mcp_cmd(p_hwfn, p_ptt,
1698 				 DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
1699 		if (rc)
1700 			return rc;
1701 		if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
1702 			p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
1703 	}
1704 
1705 	DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
1706 		   "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
1707 		info->pause_on_host, info->protocol,
1708 		info->bandwidth_min, info->bandwidth_max,
1709 		info->mac[0], info->mac[1], info->mac[2],
1710 		info->mac[3], info->mac[4], info->mac[5],
1711 		info->wwn_port, info->wwn_node,
1712 		info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
1713 
1714 	return 0;
1715 }
1716 
1717 struct qed_mcp_link_params
1718 *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
1719 {
1720 	if (!p_hwfn || !p_hwfn->mcp_info)
1721 		return NULL;
1722 	return &p_hwfn->mcp_info->link_input;
1723 }
1724 
1725 struct qed_mcp_link_state
1726 *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
1727 {
1728 	if (!p_hwfn || !p_hwfn->mcp_info)
1729 		return NULL;
1730 	return &p_hwfn->mcp_info->link_output;
1731 }
1732 
1733 struct qed_mcp_link_capabilities
1734 *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
1735 {
1736 	if (!p_hwfn || !p_hwfn->mcp_info)
1737 		return NULL;
1738 	return &p_hwfn->mcp_info->link_capabilities;
1739 }
1740 
1741 int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1742 {
1743 	u32 resp = 0, param = 0;
1744 	int rc;
1745 
1746 	rc = qed_mcp_cmd(p_hwfn, p_ptt,
1747 			 DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
1748 
1749 	/* Wait for the drain to complete before returning */
1750 	msleep(1020);
1751 
1752 	return rc;
1753 }
1754 
1755 int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
1756 			   struct qed_ptt *p_ptt, u32 *p_flash_size)
1757 {
1758 	u32 flash_size;
1759 
1760 	if (IS_VF(p_hwfn->cdev))
1761 		return -EINVAL;
1762 
1763 	flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1764 	flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1765 		      MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1766 	flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1767 
1768 	*p_flash_size = flash_size;
1769 
1770 	return 0;
1771 }
1772 
1773 int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
1774 			   struct qed_ptt *p_ptt, u8 vf_id, u8 num)
1775 {
1776 	u32 resp = 0, param = 0, rc_param = 0;
1777 	int rc;
1778 
1779 	/* Only Leader can configure MSIX, and need to take CMT into account */
1780 	if (!IS_LEAD_HWFN(p_hwfn))
1781 		return 0;
1782 	num *= p_hwfn->cdev->num_hwfns;
1783 
1784 	param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1785 		 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1786 	param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1787 		 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1788 
1789 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1790 			 &resp, &rc_param);
1791 
1792 	if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1793 		DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
1794 		rc = -EINVAL;
1795 	} else {
1796 		DP_VERBOSE(p_hwfn, QED_MSG_IOV,
1797 			   "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
1798 			   num, vf_id);
1799 	}
1800 
1801 	return rc;
1802 }
1803 
1804 int
1805 qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
1806 			 struct qed_ptt *p_ptt,
1807 			 struct qed_mcp_drv_version *p_ver)
1808 {
1809 	struct qed_mcp_mb_params mb_params;
1810 	struct drv_version_stc drv_version;
1811 	__be32 val;
1812 	u32 i;
1813 	int rc;
1814 
1815 	memset(&drv_version, 0, sizeof(drv_version));
1816 	drv_version.version = p_ver->version;
1817 	for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
1818 		val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
1819 		*(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
1820 	}
1821 
1822 	memset(&mb_params, 0, sizeof(mb_params));
1823 	mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
1824 	mb_params.p_data_src = &drv_version;
1825 	mb_params.data_src_size = sizeof(drv_version);
1826 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1827 	if (rc)
1828 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1829 
1830 	return rc;
1831 }
1832 
1833 int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1834 {
1835 	u32 resp = 0, param = 0;
1836 	int rc;
1837 
1838 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
1839 			 &param);
1840 	if (rc)
1841 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1842 
1843 	return rc;
1844 }
1845 
1846 int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1847 {
1848 	u32 value, cpu_mode;
1849 
1850 	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
1851 
1852 	value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1853 	value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
1854 	qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
1855 	cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1856 
1857 	return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
1858 }
1859 
1860 int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
1861 				     struct qed_ptt *p_ptt,
1862 				     enum qed_ov_client client)
1863 {
1864 	u32 resp = 0, param = 0;
1865 	u32 drv_mb_param;
1866 	int rc;
1867 
1868 	switch (client) {
1869 	case QED_OV_CLIENT_DRV:
1870 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
1871 		break;
1872 	case QED_OV_CLIENT_USER:
1873 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
1874 		break;
1875 	case QED_OV_CLIENT_VENDOR_SPEC:
1876 		drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
1877 		break;
1878 	default:
1879 		DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
1880 		return -EINVAL;
1881 	}
1882 
1883 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
1884 			 drv_mb_param, &resp, &param);
1885 	if (rc)
1886 		DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1887 
1888 	return rc;
1889 }
1890 
1891 int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
1892 				   struct qed_ptt *p_ptt,
1893 				   enum qed_ov_driver_state drv_state)
1894 {
1895 	u32 resp = 0, param = 0;
1896 	u32 drv_mb_param;
1897 	int rc;
1898 
1899 	switch (drv_state) {
1900 	case QED_OV_DRIVER_STATE_NOT_LOADED:
1901 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
1902 		break;
1903 	case QED_OV_DRIVER_STATE_DISABLED:
1904 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
1905 		break;
1906 	case QED_OV_DRIVER_STATE_ACTIVE:
1907 		drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
1908 		break;
1909 	default:
1910 		DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
1911 		return -EINVAL;
1912 	}
1913 
1914 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
1915 			 drv_mb_param, &resp, &param);
1916 	if (rc)
1917 		DP_ERR(p_hwfn, "Failed to send driver state\n");
1918 
1919 	return rc;
1920 }
1921 
1922 int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
1923 			  struct qed_ptt *p_ptt, u16 mtu)
1924 {
1925 	u32 resp = 0, param = 0;
1926 	u32 drv_mb_param;
1927 	int rc;
1928 
1929 	drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
1930 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
1931 			 drv_mb_param, &resp, &param);
1932 	if (rc)
1933 		DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
1934 
1935 	return rc;
1936 }
1937 
1938 int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
1939 			  struct qed_ptt *p_ptt, u8 *mac)
1940 {
1941 	struct qed_mcp_mb_params mb_params;
1942 	u32 mfw_mac[2];
1943 	int rc;
1944 
1945 	memset(&mb_params, 0, sizeof(mb_params));
1946 	mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
1947 	mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
1948 			  DRV_MSG_CODE_VMAC_TYPE_SHIFT;
1949 	mb_params.param |= MCP_PF_ID(p_hwfn);
1950 
1951 	/* MCP is BE, and on LE platforms PCI would swap access to SHMEM
1952 	 * in 32-bit granularity.
1953 	 * So the MAC has to be set in native order [and not byte order],
1954 	 * otherwise it would be read incorrectly by MFW after swap.
1955 	 */
1956 	mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
1957 	mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
1958 
1959 	mb_params.p_data_src = (u8 *)mfw_mac;
1960 	mb_params.data_src_size = 8;
1961 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
1962 	if (rc)
1963 		DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
1964 
1965 	/* Store primary MAC for later possible WoL */
1966 	memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
1967 
1968 	return rc;
1969 }
1970 
1971 int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
1972 			  struct qed_ptt *p_ptt, enum qed_ov_wol wol)
1973 {
1974 	u32 resp = 0, param = 0;
1975 	u32 drv_mb_param;
1976 	int rc;
1977 
1978 	if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
1979 		DP_VERBOSE(p_hwfn, QED_MSG_SP,
1980 			   "Can't change WoL configuration when WoL isn't supported\n");
1981 		return -EINVAL;
1982 	}
1983 
1984 	switch (wol) {
1985 	case QED_OV_WOL_DEFAULT:
1986 		drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
1987 		break;
1988 	case QED_OV_WOL_DISABLED:
1989 		drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
1990 		break;
1991 	case QED_OV_WOL_ENABLED:
1992 		drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
1993 		break;
1994 	default:
1995 		DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
1996 		return -EINVAL;
1997 	}
1998 
1999 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
2000 			 drv_mb_param, &resp, &param);
2001 	if (rc)
2002 		DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
2003 
2004 	/* Store the WoL update for a future unload */
2005 	p_hwfn->cdev->wol_config = (u8)wol;
2006 
2007 	return rc;
2008 }
2009 
2010 int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
2011 			      struct qed_ptt *p_ptt,
2012 			      enum qed_ov_eswitch eswitch)
2013 {
2014 	u32 resp = 0, param = 0;
2015 	u32 drv_mb_param;
2016 	int rc;
2017 
2018 	switch (eswitch) {
2019 	case QED_OV_ESWITCH_NONE:
2020 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
2021 		break;
2022 	case QED_OV_ESWITCH_VEB:
2023 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
2024 		break;
2025 	case QED_OV_ESWITCH_VEPA:
2026 		drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
2027 		break;
2028 	default:
2029 		DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
2030 		return -EINVAL;
2031 	}
2032 
2033 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
2034 			 drv_mb_param, &resp, &param);
2035 	if (rc)
2036 		DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
2037 
2038 	return rc;
2039 }
2040 
2041 int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
2042 		    struct qed_ptt *p_ptt, enum qed_led_mode mode)
2043 {
2044 	u32 resp = 0, param = 0, drv_mb_param;
2045 	int rc;
2046 
2047 	switch (mode) {
2048 	case QED_LED_MODE_ON:
2049 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
2050 		break;
2051 	case QED_LED_MODE_OFF:
2052 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
2053 		break;
2054 	case QED_LED_MODE_RESTORE:
2055 		drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
2056 		break;
2057 	default:
2058 		DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
2059 		return -EINVAL;
2060 	}
2061 
2062 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
2063 			 drv_mb_param, &resp, &param);
2064 
2065 	return rc;
2066 }
2067 
2068 int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
2069 			  struct qed_ptt *p_ptt, u32 mask_parities)
2070 {
2071 	u32 resp = 0, param = 0;
2072 	int rc;
2073 
2074 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
2075 			 mask_parities, &resp, &param);
2076 
2077 	if (rc) {
2078 		DP_ERR(p_hwfn,
2079 		       "MCP response failure for mask parities, aborting\n");
2080 	} else if (resp != FW_MSG_CODE_OK) {
2081 		DP_ERR(p_hwfn,
2082 		       "MCP did not acknowledge mask parity request. Old MFW?\n");
2083 		rc = -EINVAL;
2084 	}
2085 
2086 	return rc;
2087 }
2088 
2089 int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
2090 {
2091 	u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
2092 	struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2093 	u32 resp = 0, resp_param = 0;
2094 	struct qed_ptt *p_ptt;
2095 	int rc = 0;
2096 
2097 	p_ptt = qed_ptt_acquire(p_hwfn);
2098 	if (!p_ptt)
2099 		return -EBUSY;
2100 
2101 	while (bytes_left > 0) {
2102 		bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
2103 
2104 		rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2105 					DRV_MSG_CODE_NVM_READ_NVRAM,
2106 					addr + offset +
2107 					(bytes_to_copy <<
2108 					 DRV_MB_PARAM_NVM_LEN_SHIFT),
2109 					&resp, &resp_param,
2110 					&read_len,
2111 					(u32 *)(p_buf + offset));
2112 
2113 		if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
2114 			DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
2115 			break;
2116 		}
2117 
2118 		/* This can be a lengthy process, and it's possible scheduler
2119 		 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2120 		 */
2121 		if (bytes_left % 0x1000 <
2122 		    (bytes_left - read_len) % 0x1000)
2123 			usleep_range(1000, 2000);
2124 
2125 		offset += read_len;
2126 		bytes_left -= read_len;
2127 	}
2128 
2129 	cdev->mcp_nvm_resp = resp;
2130 	qed_ptt_release(p_hwfn, p_ptt);
2131 
2132 	return rc;
2133 }
2134 
2135 int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2136 {
2137 	u32 drv_mb_param = 0, rsp, param;
2138 	int rc = 0;
2139 
2140 	drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
2141 			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2142 
2143 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2144 			 drv_mb_param, &rsp, &param);
2145 
2146 	if (rc)
2147 		return rc;
2148 
2149 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2150 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
2151 		rc = -EAGAIN;
2152 
2153 	return rc;
2154 }
2155 
2156 int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2157 {
2158 	u32 drv_mb_param, rsp, param;
2159 	int rc = 0;
2160 
2161 	drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
2162 			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2163 
2164 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2165 			 drv_mb_param, &rsp, &param);
2166 
2167 	if (rc)
2168 		return rc;
2169 
2170 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2171 	    (param != DRV_MB_PARAM_BIST_RC_PASSED))
2172 		rc = -EAGAIN;
2173 
2174 	return rc;
2175 }
2176 
2177 int qed_mcp_bist_nvm_test_get_num_images(struct qed_hwfn *p_hwfn,
2178 					 struct qed_ptt *p_ptt,
2179 					 u32 *num_images)
2180 {
2181 	u32 drv_mb_param = 0, rsp;
2182 	int rc = 0;
2183 
2184 	drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
2185 			DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
2186 
2187 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
2188 			 drv_mb_param, &rsp, num_images);
2189 	if (rc)
2190 		return rc;
2191 
2192 	if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
2193 		rc = -EINVAL;
2194 
2195 	return rc;
2196 }
2197 
2198 int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
2199 					struct qed_ptt *p_ptt,
2200 					struct bist_nvm_image_att *p_image_att,
2201 					u32 image_index)
2202 {
2203 	u32 buf_size = 0, param, resp = 0, resp_param = 0;
2204 	int rc;
2205 
2206 	param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
2207 		DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
2208 	param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
2209 
2210 	rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
2211 				DRV_MSG_CODE_BIST_TEST, param,
2212 				&resp, &resp_param,
2213 				&buf_size,
2214 				(u32 *)p_image_att);
2215 	if (rc)
2216 		return rc;
2217 
2218 	if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
2219 	    (p_image_att->return_code != 1))
2220 		rc = -EINVAL;
2221 
2222 	return rc;
2223 }
2224 
2225 static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
2226 {
2227 	enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
2228 
2229 	switch (res_id) {
2230 	case QED_SB:
2231 		mfw_res_id = RESOURCE_NUM_SB_E;
2232 		break;
2233 	case QED_L2_QUEUE:
2234 		mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
2235 		break;
2236 	case QED_VPORT:
2237 		mfw_res_id = RESOURCE_NUM_VPORT_E;
2238 		break;
2239 	case QED_RSS_ENG:
2240 		mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
2241 		break;
2242 	case QED_PQ:
2243 		mfw_res_id = RESOURCE_NUM_PQ_E;
2244 		break;
2245 	case QED_RL:
2246 		mfw_res_id = RESOURCE_NUM_RL_E;
2247 		break;
2248 	case QED_MAC:
2249 	case QED_VLAN:
2250 		/* Each VFC resource can accommodate both a MAC and a VLAN */
2251 		mfw_res_id = RESOURCE_VFC_FILTER_E;
2252 		break;
2253 	case QED_ILT:
2254 		mfw_res_id = RESOURCE_ILT_E;
2255 		break;
2256 	case QED_LL2_QUEUE:
2257 		mfw_res_id = RESOURCE_LL2_QUEUE_E;
2258 		break;
2259 	case QED_RDMA_CNQ_RAM:
2260 	case QED_CMDQS_CQS:
2261 		/* CNQ/CMDQS are the same resource */
2262 		mfw_res_id = RESOURCE_CQS_E;
2263 		break;
2264 	case QED_RDMA_STATS_QUEUE:
2265 		mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
2266 		break;
2267 	case QED_BDQ:
2268 		mfw_res_id = RESOURCE_BDQ_E;
2269 		break;
2270 	default:
2271 		break;
2272 	}
2273 
2274 	return mfw_res_id;
2275 }
2276 
2277 #define QED_RESC_ALLOC_VERSION_MAJOR    2
2278 #define QED_RESC_ALLOC_VERSION_MINOR    0
2279 #define QED_RESC_ALLOC_VERSION				     \
2280 	((QED_RESC_ALLOC_VERSION_MAJOR <<		     \
2281 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
2282 	 (QED_RESC_ALLOC_VERSION_MINOR <<		     \
2283 	  DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2284 
2285 struct qed_resc_alloc_in_params {
2286 	u32 cmd;
2287 	enum qed_resources res_id;
2288 	u32 resc_max_val;
2289 };
2290 
2291 struct qed_resc_alloc_out_params {
2292 	u32 mcp_resp;
2293 	u32 mcp_param;
2294 	u32 resc_num;
2295 	u32 resc_start;
2296 	u32 vf_resc_num;
2297 	u32 vf_resc_start;
2298 	u32 flags;
2299 };
2300 
2301 static int
2302 qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
2303 			    struct qed_ptt *p_ptt,
2304 			    struct qed_resc_alloc_in_params *p_in_params,
2305 			    struct qed_resc_alloc_out_params *p_out_params)
2306 {
2307 	struct qed_mcp_mb_params mb_params;
2308 	struct resource_info mfw_resc_info;
2309 	int rc;
2310 
2311 	memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
2312 
2313 	mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
2314 	if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
2315 		DP_ERR(p_hwfn,
2316 		       "Failed to match resource %d [%s] with the MFW resources\n",
2317 		       p_in_params->res_id,
2318 		       qed_hw_get_resc_name(p_in_params->res_id));
2319 		return -EINVAL;
2320 	}
2321 
2322 	switch (p_in_params->cmd) {
2323 	case DRV_MSG_SET_RESOURCE_VALUE_MSG:
2324 		mfw_resc_info.size = p_in_params->resc_max_val;
2325 		/* Fallthrough */
2326 	case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
2327 		break;
2328 	default:
2329 		DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
2330 		       p_in_params->cmd);
2331 		return -EINVAL;
2332 	}
2333 
2334 	memset(&mb_params, 0, sizeof(mb_params));
2335 	mb_params.cmd = p_in_params->cmd;
2336 	mb_params.param = QED_RESC_ALLOC_VERSION;
2337 	mb_params.p_data_src = &mfw_resc_info;
2338 	mb_params.data_src_size = sizeof(mfw_resc_info);
2339 	mb_params.p_data_dst = mb_params.p_data_src;
2340 	mb_params.data_dst_size = mb_params.data_src_size;
2341 
2342 	DP_VERBOSE(p_hwfn,
2343 		   QED_MSG_SP,
2344 		   "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
2345 		   p_in_params->cmd,
2346 		   p_in_params->res_id,
2347 		   qed_hw_get_resc_name(p_in_params->res_id),
2348 		   QED_MFW_GET_FIELD(mb_params.param,
2349 				     DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2350 		   QED_MFW_GET_FIELD(mb_params.param,
2351 				     DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2352 		   p_in_params->resc_max_val);
2353 
2354 	rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
2355 	if (rc)
2356 		return rc;
2357 
2358 	p_out_params->mcp_resp = mb_params.mcp_resp;
2359 	p_out_params->mcp_param = mb_params.mcp_param;
2360 	p_out_params->resc_num = mfw_resc_info.size;
2361 	p_out_params->resc_start = mfw_resc_info.offset;
2362 	p_out_params->vf_resc_num = mfw_resc_info.vf_size;
2363 	p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
2364 	p_out_params->flags = mfw_resc_info.flags;
2365 
2366 	DP_VERBOSE(p_hwfn,
2367 		   QED_MSG_SP,
2368 		   "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
2369 		   QED_MFW_GET_FIELD(p_out_params->mcp_param,
2370 				     FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
2371 		   QED_MFW_GET_FIELD(p_out_params->mcp_param,
2372 				     FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
2373 		   p_out_params->resc_num,
2374 		   p_out_params->resc_start,
2375 		   p_out_params->vf_resc_num,
2376 		   p_out_params->vf_resc_start, p_out_params->flags);
2377 
2378 	return 0;
2379 }
2380 
2381 int
2382 qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
2383 			 struct qed_ptt *p_ptt,
2384 			 enum qed_resources res_id,
2385 			 u32 resc_max_val, u32 *p_mcp_resp)
2386 {
2387 	struct qed_resc_alloc_out_params out_params;
2388 	struct qed_resc_alloc_in_params in_params;
2389 	int rc;
2390 
2391 	memset(&in_params, 0, sizeof(in_params));
2392 	in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
2393 	in_params.res_id = res_id;
2394 	in_params.resc_max_val = resc_max_val;
2395 	memset(&out_params, 0, sizeof(out_params));
2396 	rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2397 					 &out_params);
2398 	if (rc)
2399 		return rc;
2400 
2401 	*p_mcp_resp = out_params.mcp_resp;
2402 
2403 	return 0;
2404 }
2405 
2406 int
2407 qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
2408 		      struct qed_ptt *p_ptt,
2409 		      enum qed_resources res_id,
2410 		      u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
2411 {
2412 	struct qed_resc_alloc_out_params out_params;
2413 	struct qed_resc_alloc_in_params in_params;
2414 	int rc;
2415 
2416 	memset(&in_params, 0, sizeof(in_params));
2417 	in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
2418 	in_params.res_id = res_id;
2419 	memset(&out_params, 0, sizeof(out_params));
2420 	rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
2421 					 &out_params);
2422 	if (rc)
2423 		return rc;
2424 
2425 	*p_mcp_resp = out_params.mcp_resp;
2426 
2427 	if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
2428 		*p_resc_num = out_params.resc_num;
2429 		*p_resc_start = out_params.resc_start;
2430 	}
2431 
2432 	return 0;
2433 }
2434 
2435 int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
2436 {
2437 	u32 mcp_resp, mcp_param;
2438 
2439 	return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
2440 			   &mcp_resp, &mcp_param);
2441 }
2442 
2443 static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
2444 				struct qed_ptt *p_ptt,
2445 				u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
2446 {
2447 	int rc;
2448 
2449 	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param,
2450 			 p_mcp_resp, p_mcp_param);
2451 	if (rc)
2452 		return rc;
2453 
2454 	if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
2455 		DP_INFO(p_hwfn,
2456 			"The resource command is unsupported by the MFW\n");
2457 		return -EINVAL;
2458 	}
2459 
2460 	if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
2461 		u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
2462 
2463 		DP_NOTICE(p_hwfn,
2464 			  "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
2465 			  param, opcode);
2466 		return -EINVAL;
2467 	}
2468 
2469 	return rc;
2470 }
2471 
2472 int
2473 __qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
2474 		    struct qed_ptt *p_ptt,
2475 		    struct qed_resc_lock_params *p_params)
2476 {
2477 	u32 param = 0, mcp_resp, mcp_param;
2478 	u8 opcode;
2479 	int rc;
2480 
2481 	switch (p_params->timeout) {
2482 	case QED_MCP_RESC_LOCK_TO_DEFAULT:
2483 		opcode = RESOURCE_OPCODE_REQ;
2484 		p_params->timeout = 0;
2485 		break;
2486 	case QED_MCP_RESC_LOCK_TO_NONE:
2487 		opcode = RESOURCE_OPCODE_REQ_WO_AGING;
2488 		p_params->timeout = 0;
2489 		break;
2490 	default:
2491 		opcode = RESOURCE_OPCODE_REQ_W_AGING;
2492 		break;
2493 	}
2494 
2495 	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
2496 	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
2497 	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
2498 
2499 	DP_VERBOSE(p_hwfn,
2500 		   QED_MSG_SP,
2501 		   "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
2502 		   param, p_params->timeout, opcode, p_params->resource);
2503 
2504 	/* Attempt to acquire the resource */
2505 	rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
2506 	if (rc)
2507 		return rc;
2508 
2509 	/* Analyze the response */
2510 	p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
2511 	opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
2512 
2513 	DP_VERBOSE(p_hwfn,
2514 		   QED_MSG_SP,
2515 		   "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
2516 		   mcp_param, opcode, p_params->owner);
2517 
2518 	switch (opcode) {
2519 	case RESOURCE_OPCODE_GNT:
2520 		p_params->b_granted = true;
2521 		break;
2522 	case RESOURCE_OPCODE_BUSY:
2523 		p_params->b_granted = false;
2524 		break;
2525 	default:
2526 		DP_NOTICE(p_hwfn,
2527 			  "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
2528 			  mcp_param, opcode);
2529 		return -EINVAL;
2530 	}
2531 
2532 	return 0;
2533 }
2534 
2535 int
2536 qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
2537 		  struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
2538 {
2539 	u32 retry_cnt = 0;
2540 	int rc;
2541 
2542 	do {
2543 		/* No need for an interval before the first iteration */
2544 		if (retry_cnt) {
2545 			if (p_params->sleep_b4_retry) {
2546 				u16 retry_interval_in_ms =
2547 				    DIV_ROUND_UP(p_params->retry_interval,
2548 						 1000);
2549 
2550 				msleep(retry_interval_in_ms);
2551 			} else {
2552 				udelay(p_params->retry_interval);
2553 			}
2554 		}
2555 
2556 		rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
2557 		if (rc)
2558 			return rc;
2559 
2560 		if (p_params->b_granted)
2561 			break;
2562 	} while (retry_cnt++ < p_params->retry_num);
2563 
2564 	return 0;
2565 }
2566 
2567 int
2568 qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
2569 		    struct qed_ptt *p_ptt,
2570 		    struct qed_resc_unlock_params *p_params)
2571 {
2572 	u32 param = 0, mcp_resp, mcp_param;
2573 	u8 opcode;
2574 	int rc;
2575 
2576 	opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
2577 				   : RESOURCE_OPCODE_RELEASE;
2578 	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
2579 	QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
2580 
2581 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
2582 		   "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
2583 		   param, opcode, p_params->resource);
2584 
2585 	/* Attempt to release the resource */
2586 	rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
2587 	if (rc)
2588 		return rc;
2589 
2590 	/* Analyze the response */
2591 	opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
2592 
2593 	DP_VERBOSE(p_hwfn, QED_MSG_SP,
2594 		   "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
2595 		   mcp_param, opcode);
2596 
2597 	switch (opcode) {
2598 	case RESOURCE_OPCODE_RELEASED_PREVIOUS:
2599 		DP_INFO(p_hwfn,
2600 			"Resource unlock request for an already released resource [%d]\n",
2601 			p_params->resource);
2602 		/* Fallthrough */
2603 	case RESOURCE_OPCODE_RELEASED:
2604 		p_params->b_released = true;
2605 		break;
2606 	case RESOURCE_OPCODE_WRONG_OWNER:
2607 		p_params->b_released = false;
2608 		break;
2609 	default:
2610 		DP_NOTICE(p_hwfn,
2611 			  "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
2612 			  mcp_param, opcode);
2613 		return -EINVAL;
2614 	}
2615 
2616 	return 0;
2617 }
2618 
2619 void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
2620 				    struct qed_resc_unlock_params *p_unlock,
2621 				    enum qed_resc_lock
2622 				    resource, bool b_is_permanent)
2623 {
2624 	if (p_lock) {
2625 		memset(p_lock, 0, sizeof(*p_lock));
2626 
2627 		/* Permanent resources don't require aging, and there's no
2628 		 * point in trying to acquire them more than once since it's
2629 		 * unexpected another entity would release them.
2630 		 */
2631 		if (b_is_permanent) {
2632 			p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
2633 		} else {
2634 			p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
2635 			p_lock->retry_interval =
2636 			    QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
2637 			p_lock->sleep_b4_retry = true;
2638 		}
2639 
2640 		p_lock->resource = resource;
2641 	}
2642 
2643 	if (p_unlock) {
2644 		memset(p_unlock, 0, sizeof(*p_unlock));
2645 		p_unlock->resource = resource;
2646 	}
2647 }
2648