1 /*
2  * Copyright (C) 2005 - 2015 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17 
18 #include <linux/module.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 
22 static char *be_port_misconfig_evt_desc[] = {
23 	"A valid SFP module detected",
24 	"Optics faulted/ incorrectly installed/ not installed.",
25 	"Optics of two types installed.",
26 	"Incompatible optics.",
27 	"Unknown port SFP status"
28 };
29 
30 static char *be_port_misconfig_remedy_desc[] = {
31 	"",
32 	"Reseat optics. If issue not resolved, replace",
33 	"Remove one optic or install matching pair of optics",
34 	"Replace with compatible optics for card to function",
35 	""
36 };
37 
38 static struct be_cmd_priv_map cmd_priv_map[] = {
39 	{
40 		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
41 		CMD_SUBSYSTEM_ETH,
42 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
43 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
44 	},
45 	{
46 		OPCODE_COMMON_GET_FLOW_CONTROL,
47 		CMD_SUBSYSTEM_COMMON,
48 		BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
49 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
50 	},
51 	{
52 		OPCODE_COMMON_SET_FLOW_CONTROL,
53 		CMD_SUBSYSTEM_COMMON,
54 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
55 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
56 	},
57 	{
58 		OPCODE_ETH_GET_PPORT_STATS,
59 		CMD_SUBSYSTEM_ETH,
60 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
61 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
62 	},
63 	{
64 		OPCODE_COMMON_GET_PHY_DETAILS,
65 		CMD_SUBSYSTEM_COMMON,
66 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
67 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
68 	}
69 };
70 
71 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
72 {
73 	int i;
74 	int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
75 	u32 cmd_privileges = adapter->cmd_privileges;
76 
77 	for (i = 0; i < num_entries; i++)
78 		if (opcode == cmd_priv_map[i].opcode &&
79 		    subsystem == cmd_priv_map[i].subsystem)
80 			if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
81 				return false;
82 
83 	return true;
84 }
85 
86 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
87 {
88 	return wrb->payload.embedded_payload;
89 }
90 
91 static void be_mcc_notify(struct be_adapter *adapter)
92 {
93 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
94 	u32 val = 0;
95 
96 	if (be_check_error(adapter, BE_ERROR_ANY))
97 		return;
98 
99 	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
100 	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
101 
102 	wmb();
103 	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
104 }
105 
106 /* To check if valid bit is set, check the entire word as we don't know
107  * the endianness of the data (old entry is host endian while a new entry is
108  * little endian) */
109 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
110 {
111 	u32 flags;
112 
113 	if (compl->flags != 0) {
114 		flags = le32_to_cpu(compl->flags);
115 		if (flags & CQE_FLAGS_VALID_MASK) {
116 			compl->flags = flags;
117 			return true;
118 		}
119 	}
120 	return false;
121 }
122 
123 /* Need to reset the entire word that houses the valid bit */
124 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
125 {
126 	compl->flags = 0;
127 }
128 
129 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
130 {
131 	unsigned long addr;
132 
133 	addr = tag1;
134 	addr = ((addr << 16) << 16) | tag0;
135 	return (void *)addr;
136 }
137 
138 static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
139 {
140 	if (base_status == MCC_STATUS_NOT_SUPPORTED ||
141 	    base_status == MCC_STATUS_ILLEGAL_REQUEST ||
142 	    addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
143 	    addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
144 	    (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
145 	    (base_status == MCC_STATUS_ILLEGAL_FIELD ||
146 	     addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
147 		return true;
148 	else
149 		return false;
150 }
151 
152 /* Place holder for all the async MCC cmds wherein the caller is not in a busy
153  * loop (has not issued be_mcc_notify_wait())
154  */
155 static void be_async_cmd_process(struct be_adapter *adapter,
156 				 struct be_mcc_compl *compl,
157 				 struct be_cmd_resp_hdr *resp_hdr)
158 {
159 	enum mcc_base_status base_status = base_status(compl->status);
160 	u8 opcode = 0, subsystem = 0;
161 
162 	if (resp_hdr) {
163 		opcode = resp_hdr->opcode;
164 		subsystem = resp_hdr->subsystem;
165 	}
166 
167 	if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
168 	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
169 		complete(&adapter->et_cmd_compl);
170 		return;
171 	}
172 
173 	if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
174 	     opcode == OPCODE_COMMON_WRITE_OBJECT) &&
175 	    subsystem == CMD_SUBSYSTEM_COMMON) {
176 		adapter->flash_status = compl->status;
177 		complete(&adapter->et_cmd_compl);
178 		return;
179 	}
180 
181 	if ((opcode == OPCODE_ETH_GET_STATISTICS ||
182 	     opcode == OPCODE_ETH_GET_PPORT_STATS) &&
183 	    subsystem == CMD_SUBSYSTEM_ETH &&
184 	    base_status == MCC_STATUS_SUCCESS) {
185 		be_parse_stats(adapter);
186 		adapter->stats_cmd_sent = false;
187 		return;
188 	}
189 
190 	if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
191 	    subsystem == CMD_SUBSYSTEM_COMMON) {
192 		if (base_status == MCC_STATUS_SUCCESS) {
193 			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
194 							(void *)resp_hdr;
195 			adapter->hwmon_info.be_on_die_temp =
196 						resp->on_die_temperature;
197 		} else {
198 			adapter->be_get_temp_freq = 0;
199 			adapter->hwmon_info.be_on_die_temp =
200 						BE_INVALID_DIE_TEMP;
201 		}
202 		return;
203 	}
204 }
205 
206 static int be_mcc_compl_process(struct be_adapter *adapter,
207 				struct be_mcc_compl *compl)
208 {
209 	enum mcc_base_status base_status;
210 	enum mcc_addl_status addl_status;
211 	struct be_cmd_resp_hdr *resp_hdr;
212 	u8 opcode = 0, subsystem = 0;
213 
214 	/* Just swap the status to host endian; mcc tag is opaquely copied
215 	 * from mcc_wrb */
216 	be_dws_le_to_cpu(compl, 4);
217 
218 	base_status = base_status(compl->status);
219 	addl_status = addl_status(compl->status);
220 
221 	resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
222 	if (resp_hdr) {
223 		opcode = resp_hdr->opcode;
224 		subsystem = resp_hdr->subsystem;
225 	}
226 
227 	be_async_cmd_process(adapter, compl, resp_hdr);
228 
229 	if (base_status != MCC_STATUS_SUCCESS &&
230 	    !be_skip_err_log(opcode, base_status, addl_status)) {
231 		if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
232 			dev_warn(&adapter->pdev->dev,
233 				 "VF is not privileged to issue opcode %d-%d\n",
234 				 opcode, subsystem);
235 		} else {
236 			dev_err(&adapter->pdev->dev,
237 				"opcode %d-%d failed:status %d-%d\n",
238 				opcode, subsystem, base_status, addl_status);
239 		}
240 	}
241 	return compl->status;
242 }
243 
244 /* Link state evt is a string of bytes; no need for endian swapping */
245 static void be_async_link_state_process(struct be_adapter *adapter,
246 					struct be_mcc_compl *compl)
247 {
248 	struct be_async_event_link_state *evt =
249 			(struct be_async_event_link_state *)compl;
250 
251 	/* When link status changes, link speed must be re-queried from FW */
252 	adapter->phy.link_speed = -1;
253 
254 	/* On BEx the FW does not send a separate link status
255 	 * notification for physical and logical link.
256 	 * On other chips just process the logical link
257 	 * status notification
258 	 */
259 	if (!BEx_chip(adapter) &&
260 	    !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
261 		return;
262 
263 	/* For the initial link status do not rely on the ASYNC event as
264 	 * it may not be received in some cases.
265 	 */
266 	if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
267 		be_link_status_update(adapter,
268 				      evt->port_link_status & LINK_STATUS_MASK);
269 }
270 
271 static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
272 						  struct be_mcc_compl *compl)
273 {
274 	struct be_async_event_misconfig_port *evt =
275 			(struct be_async_event_misconfig_port *)compl;
276 	u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1);
277 	struct device *dev = &adapter->pdev->dev;
278 	u8 port_misconfig_evt;
279 
280 	port_misconfig_evt =
281 		((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff);
282 
283 	/* Log an error message that would allow a user to determine
284 	 * whether the SFPs have an issue
285 	 */
286 	dev_info(dev, "Port %c: %s %s", adapter->port_name,
287 		 be_port_misconfig_evt_desc[port_misconfig_evt],
288 		 be_port_misconfig_remedy_desc[port_misconfig_evt]);
289 
290 	if (port_misconfig_evt == INCOMPATIBLE_SFP)
291 		adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP;
292 }
293 
294 /* Grp5 CoS Priority evt */
295 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
296 					       struct be_mcc_compl *compl)
297 {
298 	struct be_async_event_grp5_cos_priority *evt =
299 			(struct be_async_event_grp5_cos_priority *)compl;
300 
301 	if (evt->valid) {
302 		adapter->vlan_prio_bmap = evt->available_priority_bmap;
303 		adapter->recommended_prio &= ~VLAN_PRIO_MASK;
304 		adapter->recommended_prio =
305 			evt->reco_default_priority << VLAN_PRIO_SHIFT;
306 	}
307 }
308 
309 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
310 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
311 					    struct be_mcc_compl *compl)
312 {
313 	struct be_async_event_grp5_qos_link_speed *evt =
314 			(struct be_async_event_grp5_qos_link_speed *)compl;
315 
316 	if (adapter->phy.link_speed >= 0 &&
317 	    evt->physical_port == adapter->port_num)
318 		adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
319 }
320 
321 /*Grp5 PVID evt*/
322 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
323 					     struct be_mcc_compl *compl)
324 {
325 	struct be_async_event_grp5_pvid_state *evt =
326 			(struct be_async_event_grp5_pvid_state *)compl;
327 
328 	if (evt->enabled) {
329 		adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
330 		dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
331 	} else {
332 		adapter->pvid = 0;
333 	}
334 }
335 
336 #define MGMT_ENABLE_MASK	0x4
337 static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
338 					     struct be_mcc_compl *compl)
339 {
340 	struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
341 	u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
342 
343 	if (evt_dw1 & MGMT_ENABLE_MASK) {
344 		adapter->flags |= BE_FLAGS_OS2BMC;
345 		adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
346 	} else {
347 		adapter->flags &= ~BE_FLAGS_OS2BMC;
348 	}
349 }
350 
351 static void be_async_grp5_evt_process(struct be_adapter *adapter,
352 				      struct be_mcc_compl *compl)
353 {
354 	u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
355 				ASYNC_EVENT_TYPE_MASK;
356 
357 	switch (event_type) {
358 	case ASYNC_EVENT_COS_PRIORITY:
359 		be_async_grp5_cos_priority_process(adapter, compl);
360 		break;
361 	case ASYNC_EVENT_QOS_SPEED:
362 		be_async_grp5_qos_speed_process(adapter, compl);
363 		break;
364 	case ASYNC_EVENT_PVID_STATE:
365 		be_async_grp5_pvid_state_process(adapter, compl);
366 		break;
367 	/* Async event to disable/enable os2bmc and/or mac-learning */
368 	case ASYNC_EVENT_FW_CONTROL:
369 		be_async_grp5_fw_control_process(adapter, compl);
370 		break;
371 	default:
372 		break;
373 	}
374 }
375 
376 static void be_async_dbg_evt_process(struct be_adapter *adapter,
377 				     struct be_mcc_compl *cmp)
378 {
379 	u8 event_type = 0;
380 	struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
381 
382 	event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
383 			ASYNC_EVENT_TYPE_MASK;
384 
385 	switch (event_type) {
386 	case ASYNC_DEBUG_EVENT_TYPE_QNQ:
387 		if (evt->valid)
388 			adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
389 		adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
390 	break;
391 	default:
392 		dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
393 			 event_type);
394 	break;
395 	}
396 }
397 
398 static void be_async_sliport_evt_process(struct be_adapter *adapter,
399 					 struct be_mcc_compl *cmp)
400 {
401 	u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
402 			ASYNC_EVENT_TYPE_MASK;
403 
404 	if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
405 		be_async_port_misconfig_event_process(adapter, cmp);
406 }
407 
408 static inline bool is_link_state_evt(u32 flags)
409 {
410 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
411 			ASYNC_EVENT_CODE_LINK_STATE;
412 }
413 
414 static inline bool is_grp5_evt(u32 flags)
415 {
416 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
417 			ASYNC_EVENT_CODE_GRP_5;
418 }
419 
420 static inline bool is_dbg_evt(u32 flags)
421 {
422 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
423 			ASYNC_EVENT_CODE_QNQ;
424 }
425 
426 static inline bool is_sliport_evt(u32 flags)
427 {
428 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
429 		ASYNC_EVENT_CODE_SLIPORT;
430 }
431 
432 static void be_mcc_event_process(struct be_adapter *adapter,
433 				 struct be_mcc_compl *compl)
434 {
435 	if (is_link_state_evt(compl->flags))
436 		be_async_link_state_process(adapter, compl);
437 	else if (is_grp5_evt(compl->flags))
438 		be_async_grp5_evt_process(adapter, compl);
439 	else if (is_dbg_evt(compl->flags))
440 		be_async_dbg_evt_process(adapter, compl);
441 	else if (is_sliport_evt(compl->flags))
442 		be_async_sliport_evt_process(adapter, compl);
443 }
444 
445 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
446 {
447 	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
448 	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
449 
450 	if (be_mcc_compl_is_new(compl)) {
451 		queue_tail_inc(mcc_cq);
452 		return compl;
453 	}
454 	return NULL;
455 }
456 
457 void be_async_mcc_enable(struct be_adapter *adapter)
458 {
459 	spin_lock_bh(&adapter->mcc_cq_lock);
460 
461 	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
462 	adapter->mcc_obj.rearm_cq = true;
463 
464 	spin_unlock_bh(&adapter->mcc_cq_lock);
465 }
466 
467 void be_async_mcc_disable(struct be_adapter *adapter)
468 {
469 	spin_lock_bh(&adapter->mcc_cq_lock);
470 
471 	adapter->mcc_obj.rearm_cq = false;
472 	be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
473 
474 	spin_unlock_bh(&adapter->mcc_cq_lock);
475 }
476 
477 int be_process_mcc(struct be_adapter *adapter)
478 {
479 	struct be_mcc_compl *compl;
480 	int num = 0, status = 0;
481 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
482 
483 	spin_lock(&adapter->mcc_cq_lock);
484 
485 	while ((compl = be_mcc_compl_get(adapter))) {
486 		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
487 			be_mcc_event_process(adapter, compl);
488 		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
489 			status = be_mcc_compl_process(adapter, compl);
490 			atomic_dec(&mcc_obj->q.used);
491 		}
492 		be_mcc_compl_use(compl);
493 		num++;
494 	}
495 
496 	if (num)
497 		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
498 
499 	spin_unlock(&adapter->mcc_cq_lock);
500 	return status;
501 }
502 
503 /* Wait till no more pending mcc requests are present */
504 static int be_mcc_wait_compl(struct be_adapter *adapter)
505 {
506 #define mcc_timeout		120000 /* 12s timeout */
507 	int i, status = 0;
508 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
509 
510 	for (i = 0; i < mcc_timeout; i++) {
511 		if (be_check_error(adapter, BE_ERROR_ANY))
512 			return -EIO;
513 
514 		local_bh_disable();
515 		status = be_process_mcc(adapter);
516 		local_bh_enable();
517 
518 		if (atomic_read(&mcc_obj->q.used) == 0)
519 			break;
520 		udelay(100);
521 	}
522 	if (i == mcc_timeout) {
523 		dev_err(&adapter->pdev->dev, "FW not responding\n");
524 		be_set_error(adapter, BE_ERROR_FW);
525 		return -EIO;
526 	}
527 	return status;
528 }
529 
530 /* Notify MCC requests and wait for completion */
531 static int be_mcc_notify_wait(struct be_adapter *adapter)
532 {
533 	int status;
534 	struct be_mcc_wrb *wrb;
535 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
536 	u16 index = mcc_obj->q.head;
537 	struct be_cmd_resp_hdr *resp;
538 
539 	index_dec(&index, mcc_obj->q.len);
540 	wrb = queue_index_node(&mcc_obj->q, index);
541 
542 	resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
543 
544 	be_mcc_notify(adapter);
545 
546 	status = be_mcc_wait_compl(adapter);
547 	if (status == -EIO)
548 		goto out;
549 
550 	status = (resp->base_status |
551 		  ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
552 		   CQE_ADDL_STATUS_SHIFT));
553 out:
554 	return status;
555 }
556 
557 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
558 {
559 	int msecs = 0;
560 	u32 ready;
561 
562 	do {
563 		if (be_check_error(adapter, BE_ERROR_ANY))
564 			return -EIO;
565 
566 		ready = ioread32(db);
567 		if (ready == 0xffffffff)
568 			return -1;
569 
570 		ready &= MPU_MAILBOX_DB_RDY_MASK;
571 		if (ready)
572 			break;
573 
574 		if (msecs > 4000) {
575 			dev_err(&adapter->pdev->dev, "FW not responding\n");
576 			be_set_error(adapter, BE_ERROR_FW);
577 			be_detect_error(adapter);
578 			return -1;
579 		}
580 
581 		msleep(1);
582 		msecs++;
583 	} while (true);
584 
585 	return 0;
586 }
587 
588 /*
589  * Insert the mailbox address into the doorbell in two steps
590  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
591  */
592 static int be_mbox_notify_wait(struct be_adapter *adapter)
593 {
594 	int status;
595 	u32 val = 0;
596 	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
597 	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
598 	struct be_mcc_mailbox *mbox = mbox_mem->va;
599 	struct be_mcc_compl *compl = &mbox->compl;
600 
601 	/* wait for ready to be set */
602 	status = be_mbox_db_ready_wait(adapter, db);
603 	if (status != 0)
604 		return status;
605 
606 	val |= MPU_MAILBOX_DB_HI_MASK;
607 	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
608 	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
609 	iowrite32(val, db);
610 
611 	/* wait for ready to be set */
612 	status = be_mbox_db_ready_wait(adapter, db);
613 	if (status != 0)
614 		return status;
615 
616 	val = 0;
617 	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
618 	val |= (u32)(mbox_mem->dma >> 4) << 2;
619 	iowrite32(val, db);
620 
621 	status = be_mbox_db_ready_wait(adapter, db);
622 	if (status != 0)
623 		return status;
624 
625 	/* A cq entry has been made now */
626 	if (be_mcc_compl_is_new(compl)) {
627 		status = be_mcc_compl_process(adapter, &mbox->compl);
628 		be_mcc_compl_use(compl);
629 		if (status)
630 			return status;
631 	} else {
632 		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
633 		return -1;
634 	}
635 	return 0;
636 }
637 
638 static u16 be_POST_stage_get(struct be_adapter *adapter)
639 {
640 	u32 sem;
641 
642 	if (BEx_chip(adapter))
643 		sem  = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
644 	else
645 		pci_read_config_dword(adapter->pdev,
646 				      SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
647 
648 	return sem & POST_STAGE_MASK;
649 }
650 
651 static int lancer_wait_ready(struct be_adapter *adapter)
652 {
653 #define SLIPORT_READY_TIMEOUT 30
654 	u32 sliport_status;
655 	int i;
656 
657 	for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
658 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
659 		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
660 			return 0;
661 
662 		if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
663 		    !(sliport_status & SLIPORT_STATUS_RN_MASK))
664 			return -EIO;
665 
666 		msleep(1000);
667 	}
668 
669 	return sliport_status ? : -1;
670 }
671 
672 int be_fw_wait_ready(struct be_adapter *adapter)
673 {
674 	u16 stage;
675 	int status, timeout = 0;
676 	struct device *dev = &adapter->pdev->dev;
677 
678 	if (lancer_chip(adapter)) {
679 		status = lancer_wait_ready(adapter);
680 		if (status) {
681 			stage = status;
682 			goto err;
683 		}
684 		return 0;
685 	}
686 
687 	do {
688 		/* There's no means to poll POST state on BE2/3 VFs */
689 		if (BEx_chip(adapter) && be_virtfn(adapter))
690 			return 0;
691 
692 		stage = be_POST_stage_get(adapter);
693 		if (stage == POST_STAGE_ARMFW_RDY)
694 			return 0;
695 
696 		dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
697 		if (msleep_interruptible(2000)) {
698 			dev_err(dev, "Waiting for POST aborted\n");
699 			return -EINTR;
700 		}
701 		timeout += 2;
702 	} while (timeout < 60);
703 
704 err:
705 	dev_err(dev, "POST timeout; stage=%#x\n", stage);
706 	return -ETIMEDOUT;
707 }
708 
709 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
710 {
711 	return &wrb->payload.sgl[0];
712 }
713 
714 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
715 {
716 	wrb->tag0 = addr & 0xFFFFFFFF;
717 	wrb->tag1 = upper_32_bits(addr);
718 }
719 
720 /* Don't touch the hdr after it's prepared */
721 /* mem will be NULL for embedded commands */
722 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
723 				   u8 subsystem, u8 opcode, int cmd_len,
724 				   struct be_mcc_wrb *wrb,
725 				   struct be_dma_mem *mem)
726 {
727 	struct be_sge *sge;
728 
729 	req_hdr->opcode = opcode;
730 	req_hdr->subsystem = subsystem;
731 	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
732 	req_hdr->version = 0;
733 	fill_wrb_tags(wrb, (ulong) req_hdr);
734 	wrb->payload_length = cmd_len;
735 	if (mem) {
736 		wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
737 			MCC_WRB_SGE_CNT_SHIFT;
738 		sge = nonembedded_sgl(wrb);
739 		sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
740 		sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
741 		sge->len = cpu_to_le32(mem->size);
742 	} else
743 		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
744 	be_dws_cpu_to_le(wrb, 8);
745 }
746 
747 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
748 				      struct be_dma_mem *mem)
749 {
750 	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
751 	u64 dma = (u64)mem->dma;
752 
753 	for (i = 0; i < buf_pages; i++) {
754 		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
755 		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
756 		dma += PAGE_SIZE_4K;
757 	}
758 }
759 
760 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
761 {
762 	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
763 	struct be_mcc_wrb *wrb
764 		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
765 	memset(wrb, 0, sizeof(*wrb));
766 	return wrb;
767 }
768 
769 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
770 {
771 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
772 	struct be_mcc_wrb *wrb;
773 
774 	if (!mccq->created)
775 		return NULL;
776 
777 	if (atomic_read(&mccq->used) >= mccq->len)
778 		return NULL;
779 
780 	wrb = queue_head_node(mccq);
781 	queue_head_inc(mccq);
782 	atomic_inc(&mccq->used);
783 	memset(wrb, 0, sizeof(*wrb));
784 	return wrb;
785 }
786 
787 static bool use_mcc(struct be_adapter *adapter)
788 {
789 	return adapter->mcc_obj.q.created;
790 }
791 
792 /* Must be used only in process context */
793 static int be_cmd_lock(struct be_adapter *adapter)
794 {
795 	if (use_mcc(adapter)) {
796 		spin_lock_bh(&adapter->mcc_lock);
797 		return 0;
798 	} else {
799 		return mutex_lock_interruptible(&adapter->mbox_lock);
800 	}
801 }
802 
803 /* Must be used only in process context */
804 static void be_cmd_unlock(struct be_adapter *adapter)
805 {
806 	if (use_mcc(adapter))
807 		spin_unlock_bh(&adapter->mcc_lock);
808 	else
809 		return mutex_unlock(&adapter->mbox_lock);
810 }
811 
812 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
813 				      struct be_mcc_wrb *wrb)
814 {
815 	struct be_mcc_wrb *dest_wrb;
816 
817 	if (use_mcc(adapter)) {
818 		dest_wrb = wrb_from_mccq(adapter);
819 		if (!dest_wrb)
820 			return NULL;
821 	} else {
822 		dest_wrb = wrb_from_mbox(adapter);
823 	}
824 
825 	memcpy(dest_wrb, wrb, sizeof(*wrb));
826 	if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
827 		fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
828 
829 	return dest_wrb;
830 }
831 
832 /* Must be used only in process context */
833 static int be_cmd_notify_wait(struct be_adapter *adapter,
834 			      struct be_mcc_wrb *wrb)
835 {
836 	struct be_mcc_wrb *dest_wrb;
837 	int status;
838 
839 	status = be_cmd_lock(adapter);
840 	if (status)
841 		return status;
842 
843 	dest_wrb = be_cmd_copy(adapter, wrb);
844 	if (!dest_wrb)
845 		return -EBUSY;
846 
847 	if (use_mcc(adapter))
848 		status = be_mcc_notify_wait(adapter);
849 	else
850 		status = be_mbox_notify_wait(adapter);
851 
852 	if (!status)
853 		memcpy(wrb, dest_wrb, sizeof(*wrb));
854 
855 	be_cmd_unlock(adapter);
856 	return status;
857 }
858 
859 /* Tell fw we're about to start firing cmds by writing a
860  * special pattern across the wrb hdr; uses mbox
861  */
862 int be_cmd_fw_init(struct be_adapter *adapter)
863 {
864 	u8 *wrb;
865 	int status;
866 
867 	if (lancer_chip(adapter))
868 		return 0;
869 
870 	if (mutex_lock_interruptible(&adapter->mbox_lock))
871 		return -1;
872 
873 	wrb = (u8 *)wrb_from_mbox(adapter);
874 	*wrb++ = 0xFF;
875 	*wrb++ = 0x12;
876 	*wrb++ = 0x34;
877 	*wrb++ = 0xFF;
878 	*wrb++ = 0xFF;
879 	*wrb++ = 0x56;
880 	*wrb++ = 0x78;
881 	*wrb = 0xFF;
882 
883 	status = be_mbox_notify_wait(adapter);
884 
885 	mutex_unlock(&adapter->mbox_lock);
886 	return status;
887 }
888 
889 /* Tell fw we're done with firing cmds by writing a
890  * special pattern across the wrb hdr; uses mbox
891  */
892 int be_cmd_fw_clean(struct be_adapter *adapter)
893 {
894 	u8 *wrb;
895 	int status;
896 
897 	if (lancer_chip(adapter))
898 		return 0;
899 
900 	if (mutex_lock_interruptible(&adapter->mbox_lock))
901 		return -1;
902 
903 	wrb = (u8 *)wrb_from_mbox(adapter);
904 	*wrb++ = 0xFF;
905 	*wrb++ = 0xAA;
906 	*wrb++ = 0xBB;
907 	*wrb++ = 0xFF;
908 	*wrb++ = 0xFF;
909 	*wrb++ = 0xCC;
910 	*wrb++ = 0xDD;
911 	*wrb = 0xFF;
912 
913 	status = be_mbox_notify_wait(adapter);
914 
915 	mutex_unlock(&adapter->mbox_lock);
916 	return status;
917 }
918 
919 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
920 {
921 	struct be_mcc_wrb *wrb;
922 	struct be_cmd_req_eq_create *req;
923 	struct be_dma_mem *q_mem = &eqo->q.dma_mem;
924 	int status, ver = 0;
925 
926 	if (mutex_lock_interruptible(&adapter->mbox_lock))
927 		return -1;
928 
929 	wrb = wrb_from_mbox(adapter);
930 	req = embedded_payload(wrb);
931 
932 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
933 			       OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
934 			       NULL);
935 
936 	/* Support for EQ_CREATEv2 available only SH-R onwards */
937 	if (!(BEx_chip(adapter) || lancer_chip(adapter)))
938 		ver = 2;
939 
940 	req->hdr.version = ver;
941 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
942 
943 	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
944 	/* 4byte eqe*/
945 	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
946 	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
947 		      __ilog2_u32(eqo->q.len / 256));
948 	be_dws_cpu_to_le(req->context, sizeof(req->context));
949 
950 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
951 
952 	status = be_mbox_notify_wait(adapter);
953 	if (!status) {
954 		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
955 
956 		eqo->q.id = le16_to_cpu(resp->eq_id);
957 		eqo->msix_idx =
958 			(ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
959 		eqo->q.created = true;
960 	}
961 
962 	mutex_unlock(&adapter->mbox_lock);
963 	return status;
964 }
965 
966 /* Use MCC */
967 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
968 			  bool permanent, u32 if_handle, u32 pmac_id)
969 {
970 	struct be_mcc_wrb *wrb;
971 	struct be_cmd_req_mac_query *req;
972 	int status;
973 
974 	spin_lock_bh(&adapter->mcc_lock);
975 
976 	wrb = wrb_from_mccq(adapter);
977 	if (!wrb) {
978 		status = -EBUSY;
979 		goto err;
980 	}
981 	req = embedded_payload(wrb);
982 
983 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
984 			       OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
985 			       NULL);
986 	req->type = MAC_ADDRESS_TYPE_NETWORK;
987 	if (permanent) {
988 		req->permanent = 1;
989 	} else {
990 		req->if_id = cpu_to_le16((u16)if_handle);
991 		req->pmac_id = cpu_to_le32(pmac_id);
992 		req->permanent = 0;
993 	}
994 
995 	status = be_mcc_notify_wait(adapter);
996 	if (!status) {
997 		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
998 
999 		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
1000 	}
1001 
1002 err:
1003 	spin_unlock_bh(&adapter->mcc_lock);
1004 	return status;
1005 }
1006 
1007 /* Uses synchronous MCCQ */
1008 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1009 		    u32 if_id, u32 *pmac_id, u32 domain)
1010 {
1011 	struct be_mcc_wrb *wrb;
1012 	struct be_cmd_req_pmac_add *req;
1013 	int status;
1014 
1015 	spin_lock_bh(&adapter->mcc_lock);
1016 
1017 	wrb = wrb_from_mccq(adapter);
1018 	if (!wrb) {
1019 		status = -EBUSY;
1020 		goto err;
1021 	}
1022 	req = embedded_payload(wrb);
1023 
1024 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1025 			       OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
1026 			       NULL);
1027 
1028 	req->hdr.domain = domain;
1029 	req->if_id = cpu_to_le32(if_id);
1030 	memcpy(req->mac_address, mac_addr, ETH_ALEN);
1031 
1032 	status = be_mcc_notify_wait(adapter);
1033 	if (!status) {
1034 		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1035 
1036 		*pmac_id = le32_to_cpu(resp->pmac_id);
1037 	}
1038 
1039 err:
1040 	spin_unlock_bh(&adapter->mcc_lock);
1041 
1042 	 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
1043 		status = -EPERM;
1044 
1045 	return status;
1046 }
1047 
1048 /* Uses synchronous MCCQ */
1049 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
1050 {
1051 	struct be_mcc_wrb *wrb;
1052 	struct be_cmd_req_pmac_del *req;
1053 	int status;
1054 
1055 	if (pmac_id == -1)
1056 		return 0;
1057 
1058 	spin_lock_bh(&adapter->mcc_lock);
1059 
1060 	wrb = wrb_from_mccq(adapter);
1061 	if (!wrb) {
1062 		status = -EBUSY;
1063 		goto err;
1064 	}
1065 	req = embedded_payload(wrb);
1066 
1067 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1068 			       OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
1069 			       wrb, NULL);
1070 
1071 	req->hdr.domain = dom;
1072 	req->if_id = cpu_to_le32(if_id);
1073 	req->pmac_id = cpu_to_le32(pmac_id);
1074 
1075 	status = be_mcc_notify_wait(adapter);
1076 
1077 err:
1078 	spin_unlock_bh(&adapter->mcc_lock);
1079 	return status;
1080 }
1081 
1082 /* Uses Mbox */
1083 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1084 		     struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1085 {
1086 	struct be_mcc_wrb *wrb;
1087 	struct be_cmd_req_cq_create *req;
1088 	struct be_dma_mem *q_mem = &cq->dma_mem;
1089 	void *ctxt;
1090 	int status;
1091 
1092 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1093 		return -1;
1094 
1095 	wrb = wrb_from_mbox(adapter);
1096 	req = embedded_payload(wrb);
1097 	ctxt = &req->context;
1098 
1099 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1100 			       OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1101 			       NULL);
1102 
1103 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1104 
1105 	if (BEx_chip(adapter)) {
1106 		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1107 			      coalesce_wm);
1108 		AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1109 			      ctxt, no_delay);
1110 		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1111 			      __ilog2_u32(cq->len / 256));
1112 		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1113 		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1114 		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1115 	} else {
1116 		req->hdr.version = 2;
1117 		req->page_size = 1; /* 1 for 4K */
1118 
1119 		/* coalesce-wm field in this cmd is not relevant to Lancer.
1120 		 * Lancer uses COMMON_MODIFY_CQ to set this field
1121 		 */
1122 		if (!lancer_chip(adapter))
1123 			AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1124 				      ctxt, coalesce_wm);
1125 		AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1126 			      no_delay);
1127 		AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1128 			      __ilog2_u32(cq->len / 256));
1129 		AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1130 		AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1131 		AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1132 	}
1133 
1134 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1135 
1136 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1137 
1138 	status = be_mbox_notify_wait(adapter);
1139 	if (!status) {
1140 		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1141 
1142 		cq->id = le16_to_cpu(resp->cq_id);
1143 		cq->created = true;
1144 	}
1145 
1146 	mutex_unlock(&adapter->mbox_lock);
1147 
1148 	return status;
1149 }
1150 
1151 static u32 be_encoded_q_len(int q_len)
1152 {
1153 	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1154 
1155 	if (len_encoded == 16)
1156 		len_encoded = 0;
1157 	return len_encoded;
1158 }
1159 
1160 static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1161 				  struct be_queue_info *mccq,
1162 				  struct be_queue_info *cq)
1163 {
1164 	struct be_mcc_wrb *wrb;
1165 	struct be_cmd_req_mcc_ext_create *req;
1166 	struct be_dma_mem *q_mem = &mccq->dma_mem;
1167 	void *ctxt;
1168 	int status;
1169 
1170 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1171 		return -1;
1172 
1173 	wrb = wrb_from_mbox(adapter);
1174 	req = embedded_payload(wrb);
1175 	ctxt = &req->context;
1176 
1177 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1178 			       OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1179 			       NULL);
1180 
1181 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1182 	if (BEx_chip(adapter)) {
1183 		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1184 		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1185 			      be_encoded_q_len(mccq->len));
1186 		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1187 	} else {
1188 		req->hdr.version = 1;
1189 		req->cq_id = cpu_to_le16(cq->id);
1190 
1191 		AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1192 			      be_encoded_q_len(mccq->len));
1193 		AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1194 		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1195 			      ctxt, cq->id);
1196 		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1197 			      ctxt, 1);
1198 	}
1199 
1200 	/* Subscribe to Link State, Sliport Event and Group 5 Events
1201 	 * (bits 1, 5 and 17 set)
1202 	 */
1203 	req->async_event_bitmap[0] =
1204 			cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
1205 				    BIT(ASYNC_EVENT_CODE_GRP_5) |
1206 				    BIT(ASYNC_EVENT_CODE_QNQ) |
1207 				    BIT(ASYNC_EVENT_CODE_SLIPORT));
1208 
1209 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1210 
1211 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1212 
1213 	status = be_mbox_notify_wait(adapter);
1214 	if (!status) {
1215 		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1216 
1217 		mccq->id = le16_to_cpu(resp->id);
1218 		mccq->created = true;
1219 	}
1220 	mutex_unlock(&adapter->mbox_lock);
1221 
1222 	return status;
1223 }
1224 
1225 static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1226 				  struct be_queue_info *mccq,
1227 				  struct be_queue_info *cq)
1228 {
1229 	struct be_mcc_wrb *wrb;
1230 	struct be_cmd_req_mcc_create *req;
1231 	struct be_dma_mem *q_mem = &mccq->dma_mem;
1232 	void *ctxt;
1233 	int status;
1234 
1235 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1236 		return -1;
1237 
1238 	wrb = wrb_from_mbox(adapter);
1239 	req = embedded_payload(wrb);
1240 	ctxt = &req->context;
1241 
1242 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1243 			       OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1244 			       NULL);
1245 
1246 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1247 
1248 	AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1249 	AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1250 		      be_encoded_q_len(mccq->len));
1251 	AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1252 
1253 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1254 
1255 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1256 
1257 	status = be_mbox_notify_wait(adapter);
1258 	if (!status) {
1259 		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1260 
1261 		mccq->id = le16_to_cpu(resp->id);
1262 		mccq->created = true;
1263 	}
1264 
1265 	mutex_unlock(&adapter->mbox_lock);
1266 	return status;
1267 }
1268 
1269 int be_cmd_mccq_create(struct be_adapter *adapter,
1270 		       struct be_queue_info *mccq, struct be_queue_info *cq)
1271 {
1272 	int status;
1273 
1274 	status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1275 	if (status && BEx_chip(adapter)) {
1276 		dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1277 			"or newer to avoid conflicting priorities between NIC "
1278 			"and FCoE traffic");
1279 		status = be_cmd_mccq_org_create(adapter, mccq, cq);
1280 	}
1281 	return status;
1282 }
1283 
1284 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1285 {
1286 	struct be_mcc_wrb wrb = {0};
1287 	struct be_cmd_req_eth_tx_create *req;
1288 	struct be_queue_info *txq = &txo->q;
1289 	struct be_queue_info *cq = &txo->cq;
1290 	struct be_dma_mem *q_mem = &txq->dma_mem;
1291 	int status, ver = 0;
1292 
1293 	req = embedded_payload(&wrb);
1294 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1295 			       OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1296 
1297 	if (lancer_chip(adapter)) {
1298 		req->hdr.version = 1;
1299 	} else if (BEx_chip(adapter)) {
1300 		if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1301 			req->hdr.version = 2;
1302 	} else { /* For SH */
1303 		req->hdr.version = 2;
1304 	}
1305 
1306 	if (req->hdr.version > 0)
1307 		req->if_id = cpu_to_le16(adapter->if_handle);
1308 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1309 	req->ulp_num = BE_ULP1_NUM;
1310 	req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1311 	req->cq_id = cpu_to_le16(cq->id);
1312 	req->queue_size = be_encoded_q_len(txq->len);
1313 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1314 	ver = req->hdr.version;
1315 
1316 	status = be_cmd_notify_wait(adapter, &wrb);
1317 	if (!status) {
1318 		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1319 
1320 		txq->id = le16_to_cpu(resp->cid);
1321 		if (ver == 2)
1322 			txo->db_offset = le32_to_cpu(resp->db_offset);
1323 		else
1324 			txo->db_offset = DB_TXULP1_OFFSET;
1325 		txq->created = true;
1326 	}
1327 
1328 	return status;
1329 }
1330 
1331 /* Uses MCC */
1332 int be_cmd_rxq_create(struct be_adapter *adapter,
1333 		      struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1334 		      u32 if_id, u32 rss, u8 *rss_id)
1335 {
1336 	struct be_mcc_wrb *wrb;
1337 	struct be_cmd_req_eth_rx_create *req;
1338 	struct be_dma_mem *q_mem = &rxq->dma_mem;
1339 	int status;
1340 
1341 	spin_lock_bh(&adapter->mcc_lock);
1342 
1343 	wrb = wrb_from_mccq(adapter);
1344 	if (!wrb) {
1345 		status = -EBUSY;
1346 		goto err;
1347 	}
1348 	req = embedded_payload(wrb);
1349 
1350 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1351 			       OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1352 
1353 	req->cq_id = cpu_to_le16(cq_id);
1354 	req->frag_size = fls(frag_size) - 1;
1355 	req->num_pages = 2;
1356 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1357 	req->interface_id = cpu_to_le32(if_id);
1358 	req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1359 	req->rss_queue = cpu_to_le32(rss);
1360 
1361 	status = be_mcc_notify_wait(adapter);
1362 	if (!status) {
1363 		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1364 
1365 		rxq->id = le16_to_cpu(resp->id);
1366 		rxq->created = true;
1367 		*rss_id = resp->rss_id;
1368 	}
1369 
1370 err:
1371 	spin_unlock_bh(&adapter->mcc_lock);
1372 	return status;
1373 }
1374 
1375 /* Generic destroyer function for all types of queues
1376  * Uses Mbox
1377  */
1378 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1379 		     int queue_type)
1380 {
1381 	struct be_mcc_wrb *wrb;
1382 	struct be_cmd_req_q_destroy *req;
1383 	u8 subsys = 0, opcode = 0;
1384 	int status;
1385 
1386 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1387 		return -1;
1388 
1389 	wrb = wrb_from_mbox(adapter);
1390 	req = embedded_payload(wrb);
1391 
1392 	switch (queue_type) {
1393 	case QTYPE_EQ:
1394 		subsys = CMD_SUBSYSTEM_COMMON;
1395 		opcode = OPCODE_COMMON_EQ_DESTROY;
1396 		break;
1397 	case QTYPE_CQ:
1398 		subsys = CMD_SUBSYSTEM_COMMON;
1399 		opcode = OPCODE_COMMON_CQ_DESTROY;
1400 		break;
1401 	case QTYPE_TXQ:
1402 		subsys = CMD_SUBSYSTEM_ETH;
1403 		opcode = OPCODE_ETH_TX_DESTROY;
1404 		break;
1405 	case QTYPE_RXQ:
1406 		subsys = CMD_SUBSYSTEM_ETH;
1407 		opcode = OPCODE_ETH_RX_DESTROY;
1408 		break;
1409 	case QTYPE_MCCQ:
1410 		subsys = CMD_SUBSYSTEM_COMMON;
1411 		opcode = OPCODE_COMMON_MCC_DESTROY;
1412 		break;
1413 	default:
1414 		BUG();
1415 	}
1416 
1417 	be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1418 			       NULL);
1419 	req->id = cpu_to_le16(q->id);
1420 
1421 	status = be_mbox_notify_wait(adapter);
1422 	q->created = false;
1423 
1424 	mutex_unlock(&adapter->mbox_lock);
1425 	return status;
1426 }
1427 
1428 /* Uses MCC */
1429 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1430 {
1431 	struct be_mcc_wrb *wrb;
1432 	struct be_cmd_req_q_destroy *req;
1433 	int status;
1434 
1435 	spin_lock_bh(&adapter->mcc_lock);
1436 
1437 	wrb = wrb_from_mccq(adapter);
1438 	if (!wrb) {
1439 		status = -EBUSY;
1440 		goto err;
1441 	}
1442 	req = embedded_payload(wrb);
1443 
1444 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1445 			       OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1446 	req->id = cpu_to_le16(q->id);
1447 
1448 	status = be_mcc_notify_wait(adapter);
1449 	q->created = false;
1450 
1451 err:
1452 	spin_unlock_bh(&adapter->mcc_lock);
1453 	return status;
1454 }
1455 
1456 /* Create an rx filtering policy configuration on an i/f
1457  * Will use MBOX only if MCCQ has not been created.
1458  */
1459 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1460 		     u32 *if_handle, u32 domain)
1461 {
1462 	struct be_mcc_wrb wrb = {0};
1463 	struct be_cmd_req_if_create *req;
1464 	int status;
1465 
1466 	req = embedded_payload(&wrb);
1467 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1468 			       OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1469 			       sizeof(*req), &wrb, NULL);
1470 	req->hdr.domain = domain;
1471 	req->capability_flags = cpu_to_le32(cap_flags);
1472 	req->enable_flags = cpu_to_le32(en_flags);
1473 	req->pmac_invalid = true;
1474 
1475 	status = be_cmd_notify_wait(adapter, &wrb);
1476 	if (!status) {
1477 		struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1478 
1479 		*if_handle = le32_to_cpu(resp->interface_id);
1480 
1481 		/* Hack to retrieve VF's pmac-id on BE3 */
1482 		if (BE3_chip(adapter) && be_virtfn(adapter))
1483 			adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1484 	}
1485 	return status;
1486 }
1487 
1488 /* Uses MCCQ */
1489 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1490 {
1491 	struct be_mcc_wrb *wrb;
1492 	struct be_cmd_req_if_destroy *req;
1493 	int status;
1494 
1495 	if (interface_id == -1)
1496 		return 0;
1497 
1498 	spin_lock_bh(&adapter->mcc_lock);
1499 
1500 	wrb = wrb_from_mccq(adapter);
1501 	if (!wrb) {
1502 		status = -EBUSY;
1503 		goto err;
1504 	}
1505 	req = embedded_payload(wrb);
1506 
1507 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1508 			       OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1509 			       sizeof(*req), wrb, NULL);
1510 	req->hdr.domain = domain;
1511 	req->interface_id = cpu_to_le32(interface_id);
1512 
1513 	status = be_mcc_notify_wait(adapter);
1514 err:
1515 	spin_unlock_bh(&adapter->mcc_lock);
1516 	return status;
1517 }
1518 
1519 /* Get stats is a non embedded command: the request is not embedded inside
1520  * WRB but is a separate dma memory block
1521  * Uses asynchronous MCC
1522  */
1523 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1524 {
1525 	struct be_mcc_wrb *wrb;
1526 	struct be_cmd_req_hdr *hdr;
1527 	int status = 0;
1528 
1529 	spin_lock_bh(&adapter->mcc_lock);
1530 
1531 	wrb = wrb_from_mccq(adapter);
1532 	if (!wrb) {
1533 		status = -EBUSY;
1534 		goto err;
1535 	}
1536 	hdr = nonemb_cmd->va;
1537 
1538 	be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1539 			       OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1540 			       nonemb_cmd);
1541 
1542 	/* version 1 of the cmd is not supported only by BE2 */
1543 	if (BE2_chip(adapter))
1544 		hdr->version = 0;
1545 	if (BE3_chip(adapter) || lancer_chip(adapter))
1546 		hdr->version = 1;
1547 	else
1548 		hdr->version = 2;
1549 
1550 	be_mcc_notify(adapter);
1551 	adapter->stats_cmd_sent = true;
1552 
1553 err:
1554 	spin_unlock_bh(&adapter->mcc_lock);
1555 	return status;
1556 }
1557 
1558 /* Lancer Stats */
1559 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1560 			       struct be_dma_mem *nonemb_cmd)
1561 {
1562 	struct be_mcc_wrb *wrb;
1563 	struct lancer_cmd_req_pport_stats *req;
1564 	int status = 0;
1565 
1566 	if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1567 			    CMD_SUBSYSTEM_ETH))
1568 		return -EPERM;
1569 
1570 	spin_lock_bh(&adapter->mcc_lock);
1571 
1572 	wrb = wrb_from_mccq(adapter);
1573 	if (!wrb) {
1574 		status = -EBUSY;
1575 		goto err;
1576 	}
1577 	req = nonemb_cmd->va;
1578 
1579 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1580 			       OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1581 			       wrb, nonemb_cmd);
1582 
1583 	req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1584 	req->cmd_params.params.reset_stats = 0;
1585 
1586 	be_mcc_notify(adapter);
1587 	adapter->stats_cmd_sent = true;
1588 
1589 err:
1590 	spin_unlock_bh(&adapter->mcc_lock);
1591 	return status;
1592 }
1593 
1594 static int be_mac_to_link_speed(int mac_speed)
1595 {
1596 	switch (mac_speed) {
1597 	case PHY_LINK_SPEED_ZERO:
1598 		return 0;
1599 	case PHY_LINK_SPEED_10MBPS:
1600 		return 10;
1601 	case PHY_LINK_SPEED_100MBPS:
1602 		return 100;
1603 	case PHY_LINK_SPEED_1GBPS:
1604 		return 1000;
1605 	case PHY_LINK_SPEED_10GBPS:
1606 		return 10000;
1607 	case PHY_LINK_SPEED_20GBPS:
1608 		return 20000;
1609 	case PHY_LINK_SPEED_25GBPS:
1610 		return 25000;
1611 	case PHY_LINK_SPEED_40GBPS:
1612 		return 40000;
1613 	}
1614 	return 0;
1615 }
1616 
1617 /* Uses synchronous mcc
1618  * Returns link_speed in Mbps
1619  */
1620 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1621 			     u8 *link_status, u32 dom)
1622 {
1623 	struct be_mcc_wrb *wrb;
1624 	struct be_cmd_req_link_status *req;
1625 	int status;
1626 
1627 	spin_lock_bh(&adapter->mcc_lock);
1628 
1629 	if (link_status)
1630 		*link_status = LINK_DOWN;
1631 
1632 	wrb = wrb_from_mccq(adapter);
1633 	if (!wrb) {
1634 		status = -EBUSY;
1635 		goto err;
1636 	}
1637 	req = embedded_payload(wrb);
1638 
1639 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1640 			       OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1641 			       sizeof(*req), wrb, NULL);
1642 
1643 	/* version 1 of the cmd is not supported only by BE2 */
1644 	if (!BE2_chip(adapter))
1645 		req->hdr.version = 1;
1646 
1647 	req->hdr.domain = dom;
1648 
1649 	status = be_mcc_notify_wait(adapter);
1650 	if (!status) {
1651 		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1652 
1653 		if (link_speed) {
1654 			*link_speed = resp->link_speed ?
1655 				      le16_to_cpu(resp->link_speed) * 10 :
1656 				      be_mac_to_link_speed(resp->mac_speed);
1657 
1658 			if (!resp->logical_link_status)
1659 				*link_speed = 0;
1660 		}
1661 		if (link_status)
1662 			*link_status = resp->logical_link_status;
1663 	}
1664 
1665 err:
1666 	spin_unlock_bh(&adapter->mcc_lock);
1667 	return status;
1668 }
1669 
1670 /* Uses synchronous mcc */
1671 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1672 {
1673 	struct be_mcc_wrb *wrb;
1674 	struct be_cmd_req_get_cntl_addnl_attribs *req;
1675 	int status = 0;
1676 
1677 	spin_lock_bh(&adapter->mcc_lock);
1678 
1679 	wrb = wrb_from_mccq(adapter);
1680 	if (!wrb) {
1681 		status = -EBUSY;
1682 		goto err;
1683 	}
1684 	req = embedded_payload(wrb);
1685 
1686 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1687 			       OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1688 			       sizeof(*req), wrb, NULL);
1689 
1690 	be_mcc_notify(adapter);
1691 
1692 err:
1693 	spin_unlock_bh(&adapter->mcc_lock);
1694 	return status;
1695 }
1696 
1697 /* Uses synchronous mcc */
1698 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1699 {
1700 	struct be_mcc_wrb *wrb;
1701 	struct be_cmd_req_get_fat *req;
1702 	int status;
1703 
1704 	spin_lock_bh(&adapter->mcc_lock);
1705 
1706 	wrb = wrb_from_mccq(adapter);
1707 	if (!wrb) {
1708 		status = -EBUSY;
1709 		goto err;
1710 	}
1711 	req = embedded_payload(wrb);
1712 
1713 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1714 			       OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
1715 			       NULL);
1716 	req->fat_operation = cpu_to_le32(QUERY_FAT);
1717 	status = be_mcc_notify_wait(adapter);
1718 	if (!status) {
1719 		struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1720 
1721 		if (log_size && resp->log_size)
1722 			*log_size = le32_to_cpu(resp->log_size) -
1723 					sizeof(u32);
1724 	}
1725 err:
1726 	spin_unlock_bh(&adapter->mcc_lock);
1727 	return status;
1728 }
1729 
1730 int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1731 {
1732 	struct be_dma_mem get_fat_cmd;
1733 	struct be_mcc_wrb *wrb;
1734 	struct be_cmd_req_get_fat *req;
1735 	u32 offset = 0, total_size, buf_size,
1736 				log_offset = sizeof(u32), payload_len;
1737 	int status = 0;
1738 
1739 	if (buf_len == 0)
1740 		return -EIO;
1741 
1742 	total_size = buf_len;
1743 
1744 	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1745 	get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1746 					     get_fat_cmd.size,
1747 					     &get_fat_cmd.dma, GFP_ATOMIC);
1748 	if (!get_fat_cmd.va) {
1749 		dev_err(&adapter->pdev->dev,
1750 			"Memory allocation failure while reading FAT data\n");
1751 		return -ENOMEM;
1752 	}
1753 
1754 	spin_lock_bh(&adapter->mcc_lock);
1755 
1756 	while (total_size) {
1757 		buf_size = min(total_size, (u32)60*1024);
1758 		total_size -= buf_size;
1759 
1760 		wrb = wrb_from_mccq(adapter);
1761 		if (!wrb) {
1762 			status = -EBUSY;
1763 			goto err;
1764 		}
1765 		req = get_fat_cmd.va;
1766 
1767 		payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1768 		be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1769 				       OPCODE_COMMON_MANAGE_FAT, payload_len,
1770 				       wrb, &get_fat_cmd);
1771 
1772 		req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1773 		req->read_log_offset = cpu_to_le32(log_offset);
1774 		req->read_log_length = cpu_to_le32(buf_size);
1775 		req->data_buffer_size = cpu_to_le32(buf_size);
1776 
1777 		status = be_mcc_notify_wait(adapter);
1778 		if (!status) {
1779 			struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1780 
1781 			memcpy(buf + offset,
1782 			       resp->data_buffer,
1783 			       le32_to_cpu(resp->read_log_length));
1784 		} else {
1785 			dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1786 			goto err;
1787 		}
1788 		offset += buf_size;
1789 		log_offset += buf_size;
1790 	}
1791 err:
1792 	dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
1793 			  get_fat_cmd.va, get_fat_cmd.dma);
1794 	spin_unlock_bh(&adapter->mcc_lock);
1795 	return status;
1796 }
1797 
1798 /* Uses synchronous mcc */
1799 int be_cmd_get_fw_ver(struct be_adapter *adapter)
1800 {
1801 	struct be_mcc_wrb *wrb;
1802 	struct be_cmd_req_get_fw_version *req;
1803 	int status;
1804 
1805 	spin_lock_bh(&adapter->mcc_lock);
1806 
1807 	wrb = wrb_from_mccq(adapter);
1808 	if (!wrb) {
1809 		status = -EBUSY;
1810 		goto err;
1811 	}
1812 
1813 	req = embedded_payload(wrb);
1814 
1815 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1816 			       OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1817 			       NULL);
1818 	status = be_mcc_notify_wait(adapter);
1819 	if (!status) {
1820 		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1821 
1822 		strlcpy(adapter->fw_ver, resp->firmware_version_string,
1823 			sizeof(adapter->fw_ver));
1824 		strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
1825 			sizeof(adapter->fw_on_flash));
1826 	}
1827 err:
1828 	spin_unlock_bh(&adapter->mcc_lock);
1829 	return status;
1830 }
1831 
1832 /* set the EQ delay interval of an EQ to specified value
1833  * Uses async mcc
1834  */
1835 static int __be_cmd_modify_eqd(struct be_adapter *adapter,
1836 			       struct be_set_eqd *set_eqd, int num)
1837 {
1838 	struct be_mcc_wrb *wrb;
1839 	struct be_cmd_req_modify_eq_delay *req;
1840 	int status = 0, i;
1841 
1842 	spin_lock_bh(&adapter->mcc_lock);
1843 
1844 	wrb = wrb_from_mccq(adapter);
1845 	if (!wrb) {
1846 		status = -EBUSY;
1847 		goto err;
1848 	}
1849 	req = embedded_payload(wrb);
1850 
1851 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1852 			       OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1853 			       NULL);
1854 
1855 	req->num_eq = cpu_to_le32(num);
1856 	for (i = 0; i < num; i++) {
1857 		req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1858 		req->set_eqd[i].phase = 0;
1859 		req->set_eqd[i].delay_multiplier =
1860 				cpu_to_le32(set_eqd[i].delay_multiplier);
1861 	}
1862 
1863 	be_mcc_notify(adapter);
1864 err:
1865 	spin_unlock_bh(&adapter->mcc_lock);
1866 	return status;
1867 }
1868 
1869 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1870 		      int num)
1871 {
1872 	int num_eqs, i = 0;
1873 
1874 	while (num) {
1875 		num_eqs = min(num, 8);
1876 		__be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1877 		i += num_eqs;
1878 		num -= num_eqs;
1879 	}
1880 
1881 	return 0;
1882 }
1883 
1884 /* Uses sycnhronous mcc */
1885 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1886 		       u32 num, u32 domain)
1887 {
1888 	struct be_mcc_wrb *wrb;
1889 	struct be_cmd_req_vlan_config *req;
1890 	int status;
1891 
1892 	spin_lock_bh(&adapter->mcc_lock);
1893 
1894 	wrb = wrb_from_mccq(adapter);
1895 	if (!wrb) {
1896 		status = -EBUSY;
1897 		goto err;
1898 	}
1899 	req = embedded_payload(wrb);
1900 
1901 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1902 			       OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1903 			       wrb, NULL);
1904 	req->hdr.domain = domain;
1905 
1906 	req->interface_id = if_id;
1907 	req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1908 	req->num_vlan = num;
1909 	memcpy(req->normal_vlan, vtag_array,
1910 	       req->num_vlan * sizeof(vtag_array[0]));
1911 
1912 	status = be_mcc_notify_wait(adapter);
1913 err:
1914 	spin_unlock_bh(&adapter->mcc_lock);
1915 	return status;
1916 }
1917 
1918 static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1919 {
1920 	struct be_mcc_wrb *wrb;
1921 	struct be_dma_mem *mem = &adapter->rx_filter;
1922 	struct be_cmd_req_rx_filter *req = mem->va;
1923 	int status;
1924 
1925 	spin_lock_bh(&adapter->mcc_lock);
1926 
1927 	wrb = wrb_from_mccq(adapter);
1928 	if (!wrb) {
1929 		status = -EBUSY;
1930 		goto err;
1931 	}
1932 	memset(req, 0, sizeof(*req));
1933 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1934 			       OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1935 			       wrb, mem);
1936 
1937 	req->if_id = cpu_to_le32(adapter->if_handle);
1938 	req->if_flags_mask = cpu_to_le32(flags);
1939 	req->if_flags = (value == ON) ? req->if_flags_mask : 0;
1940 
1941 	if (flags & BE_IF_FLAGS_MULTICAST) {
1942 		struct netdev_hw_addr *ha;
1943 		int i = 0;
1944 
1945 		/* Reset mcast promisc mode if already set by setting mask
1946 		 * and not setting flags field
1947 		 */
1948 		req->if_flags_mask |=
1949 			cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1950 				    be_if_cap_flags(adapter));
1951 		req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1952 		netdev_for_each_mc_addr(ha, adapter->netdev)
1953 			memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1954 	}
1955 
1956 	status = be_mcc_notify_wait(adapter);
1957 err:
1958 	spin_unlock_bh(&adapter->mcc_lock);
1959 	return status;
1960 }
1961 
1962 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1963 {
1964 	struct device *dev = &adapter->pdev->dev;
1965 
1966 	if ((flags & be_if_cap_flags(adapter)) != flags) {
1967 		dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
1968 		dev_warn(dev, "Interface is capable of 0x%x flags only\n",
1969 			 be_if_cap_flags(adapter));
1970 	}
1971 	flags &= be_if_cap_flags(adapter);
1972 
1973 	return __be_cmd_rx_filter(adapter, flags, value);
1974 }
1975 
1976 /* Uses synchrounous mcc */
1977 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1978 {
1979 	struct be_mcc_wrb *wrb;
1980 	struct be_cmd_req_set_flow_control *req;
1981 	int status;
1982 
1983 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1984 			    CMD_SUBSYSTEM_COMMON))
1985 		return -EPERM;
1986 
1987 	spin_lock_bh(&adapter->mcc_lock);
1988 
1989 	wrb = wrb_from_mccq(adapter);
1990 	if (!wrb) {
1991 		status = -EBUSY;
1992 		goto err;
1993 	}
1994 	req = embedded_payload(wrb);
1995 
1996 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1997 			       OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
1998 			       wrb, NULL);
1999 
2000 	req->hdr.version = 1;
2001 	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
2002 	req->rx_flow_control = cpu_to_le16((u16)rx_fc);
2003 
2004 	status = be_mcc_notify_wait(adapter);
2005 
2006 err:
2007 	spin_unlock_bh(&adapter->mcc_lock);
2008 
2009 	if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
2010 		return  -EOPNOTSUPP;
2011 
2012 	return status;
2013 }
2014 
2015 /* Uses sycn mcc */
2016 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
2017 {
2018 	struct be_mcc_wrb *wrb;
2019 	struct be_cmd_req_get_flow_control *req;
2020 	int status;
2021 
2022 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
2023 			    CMD_SUBSYSTEM_COMMON))
2024 		return -EPERM;
2025 
2026 	spin_lock_bh(&adapter->mcc_lock);
2027 
2028 	wrb = wrb_from_mccq(adapter);
2029 	if (!wrb) {
2030 		status = -EBUSY;
2031 		goto err;
2032 	}
2033 	req = embedded_payload(wrb);
2034 
2035 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2036 			       OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
2037 			       wrb, NULL);
2038 
2039 	status = be_mcc_notify_wait(adapter);
2040 	if (!status) {
2041 		struct be_cmd_resp_get_flow_control *resp =
2042 						embedded_payload(wrb);
2043 
2044 		*tx_fc = le16_to_cpu(resp->tx_flow_control);
2045 		*rx_fc = le16_to_cpu(resp->rx_flow_control);
2046 	}
2047 
2048 err:
2049 	spin_unlock_bh(&adapter->mcc_lock);
2050 	return status;
2051 }
2052 
2053 /* Uses mbox */
2054 int be_cmd_query_fw_cfg(struct be_adapter *adapter)
2055 {
2056 	struct be_mcc_wrb *wrb;
2057 	struct be_cmd_req_query_fw_cfg *req;
2058 	int status;
2059 
2060 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2061 		return -1;
2062 
2063 	wrb = wrb_from_mbox(adapter);
2064 	req = embedded_payload(wrb);
2065 
2066 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2067 			       OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2068 			       sizeof(*req), wrb, NULL);
2069 
2070 	status = be_mbox_notify_wait(adapter);
2071 	if (!status) {
2072 		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2073 
2074 		adapter->port_num = le32_to_cpu(resp->phys_port);
2075 		adapter->function_mode = le32_to_cpu(resp->function_mode);
2076 		adapter->function_caps = le32_to_cpu(resp->function_caps);
2077 		adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
2078 		dev_info(&adapter->pdev->dev,
2079 			 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2080 			 adapter->function_mode, adapter->function_caps);
2081 	}
2082 
2083 	mutex_unlock(&adapter->mbox_lock);
2084 	return status;
2085 }
2086 
2087 /* Uses mbox */
2088 int be_cmd_reset_function(struct be_adapter *adapter)
2089 {
2090 	struct be_mcc_wrb *wrb;
2091 	struct be_cmd_req_hdr *req;
2092 	int status;
2093 
2094 	if (lancer_chip(adapter)) {
2095 		iowrite32(SLI_PORT_CONTROL_IP_MASK,
2096 			  adapter->db + SLIPORT_CONTROL_OFFSET);
2097 		status = lancer_wait_ready(adapter);
2098 		if (status)
2099 			dev_err(&adapter->pdev->dev,
2100 				"Adapter in non recoverable error\n");
2101 		return status;
2102 	}
2103 
2104 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2105 		return -1;
2106 
2107 	wrb = wrb_from_mbox(adapter);
2108 	req = embedded_payload(wrb);
2109 
2110 	be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2111 			       OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2112 			       NULL);
2113 
2114 	status = be_mbox_notify_wait(adapter);
2115 
2116 	mutex_unlock(&adapter->mbox_lock);
2117 	return status;
2118 }
2119 
2120 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2121 		      u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2122 {
2123 	struct be_mcc_wrb *wrb;
2124 	struct be_cmd_req_rss_config *req;
2125 	int status;
2126 
2127 	if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2128 		return 0;
2129 
2130 	spin_lock_bh(&adapter->mcc_lock);
2131 
2132 	wrb = wrb_from_mccq(adapter);
2133 	if (!wrb) {
2134 		status = -EBUSY;
2135 		goto err;
2136 	}
2137 	req = embedded_payload(wrb);
2138 
2139 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2140 			       OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2141 
2142 	req->if_id = cpu_to_le32(adapter->if_handle);
2143 	req->enable_rss = cpu_to_le16(rss_hash_opts);
2144 	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2145 
2146 	if (!BEx_chip(adapter))
2147 		req->hdr.version = 1;
2148 
2149 	memcpy(req->cpu_table, rsstable, table_size);
2150 	memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2151 	be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2152 
2153 	status = be_mcc_notify_wait(adapter);
2154 err:
2155 	spin_unlock_bh(&adapter->mcc_lock);
2156 	return status;
2157 }
2158 
2159 /* Uses sync mcc */
2160 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2161 			    u8 bcn, u8 sts, u8 state)
2162 {
2163 	struct be_mcc_wrb *wrb;
2164 	struct be_cmd_req_enable_disable_beacon *req;
2165 	int status;
2166 
2167 	spin_lock_bh(&adapter->mcc_lock);
2168 
2169 	wrb = wrb_from_mccq(adapter);
2170 	if (!wrb) {
2171 		status = -EBUSY;
2172 		goto err;
2173 	}
2174 	req = embedded_payload(wrb);
2175 
2176 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2177 			       OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2178 			       sizeof(*req), wrb, NULL);
2179 
2180 	req->port_num = port_num;
2181 	req->beacon_state = state;
2182 	req->beacon_duration = bcn;
2183 	req->status_duration = sts;
2184 
2185 	status = be_mcc_notify_wait(adapter);
2186 
2187 err:
2188 	spin_unlock_bh(&adapter->mcc_lock);
2189 	return status;
2190 }
2191 
2192 /* Uses sync mcc */
2193 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2194 {
2195 	struct be_mcc_wrb *wrb;
2196 	struct be_cmd_req_get_beacon_state *req;
2197 	int status;
2198 
2199 	spin_lock_bh(&adapter->mcc_lock);
2200 
2201 	wrb = wrb_from_mccq(adapter);
2202 	if (!wrb) {
2203 		status = -EBUSY;
2204 		goto err;
2205 	}
2206 	req = embedded_payload(wrb);
2207 
2208 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2209 			       OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2210 			       wrb, NULL);
2211 
2212 	req->port_num = port_num;
2213 
2214 	status = be_mcc_notify_wait(adapter);
2215 	if (!status) {
2216 		struct be_cmd_resp_get_beacon_state *resp =
2217 						embedded_payload(wrb);
2218 
2219 		*state = resp->beacon_state;
2220 	}
2221 
2222 err:
2223 	spin_unlock_bh(&adapter->mcc_lock);
2224 	return status;
2225 }
2226 
2227 /* Uses sync mcc */
2228 int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2229 				      u8 page_num, u8 *data)
2230 {
2231 	struct be_dma_mem cmd;
2232 	struct be_mcc_wrb *wrb;
2233 	struct be_cmd_req_port_type *req;
2234 	int status;
2235 
2236 	if (page_num > TR_PAGE_A2)
2237 		return -EINVAL;
2238 
2239 	cmd.size = sizeof(struct be_cmd_resp_port_type);
2240 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2241 				     GFP_ATOMIC);
2242 	if (!cmd.va) {
2243 		dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2244 		return -ENOMEM;
2245 	}
2246 
2247 	spin_lock_bh(&adapter->mcc_lock);
2248 
2249 	wrb = wrb_from_mccq(adapter);
2250 	if (!wrb) {
2251 		status = -EBUSY;
2252 		goto err;
2253 	}
2254 	req = cmd.va;
2255 
2256 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2257 			       OPCODE_COMMON_READ_TRANSRECV_DATA,
2258 			       cmd.size, wrb, &cmd);
2259 
2260 	req->port = cpu_to_le32(adapter->hba_port_num);
2261 	req->page_num = cpu_to_le32(page_num);
2262 	status = be_mcc_notify_wait(adapter);
2263 	if (!status) {
2264 		struct be_cmd_resp_port_type *resp = cmd.va;
2265 
2266 		memcpy(data, resp->page_data, PAGE_DATA_LEN);
2267 	}
2268 err:
2269 	spin_unlock_bh(&adapter->mcc_lock);
2270 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2271 	return status;
2272 }
2273 
2274 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2275 			    u32 data_size, u32 data_offset,
2276 			    const char *obj_name, u32 *data_written,
2277 			    u8 *change_status, u8 *addn_status)
2278 {
2279 	struct be_mcc_wrb *wrb;
2280 	struct lancer_cmd_req_write_object *req;
2281 	struct lancer_cmd_resp_write_object *resp;
2282 	void *ctxt = NULL;
2283 	int status;
2284 
2285 	spin_lock_bh(&adapter->mcc_lock);
2286 	adapter->flash_status = 0;
2287 
2288 	wrb = wrb_from_mccq(adapter);
2289 	if (!wrb) {
2290 		status = -EBUSY;
2291 		goto err_unlock;
2292 	}
2293 
2294 	req = embedded_payload(wrb);
2295 
2296 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2297 			       OPCODE_COMMON_WRITE_OBJECT,
2298 			       sizeof(struct lancer_cmd_req_write_object), wrb,
2299 			       NULL);
2300 
2301 	ctxt = &req->context;
2302 	AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2303 		      write_length, ctxt, data_size);
2304 
2305 	if (data_size == 0)
2306 		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2307 			      eof, ctxt, 1);
2308 	else
2309 		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2310 			      eof, ctxt, 0);
2311 
2312 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
2313 	req->write_offset = cpu_to_le32(data_offset);
2314 	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2315 	req->descriptor_count = cpu_to_le32(1);
2316 	req->buf_len = cpu_to_le32(data_size);
2317 	req->addr_low = cpu_to_le32((cmd->dma +
2318 				     sizeof(struct lancer_cmd_req_write_object))
2319 				    & 0xFFFFFFFF);
2320 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2321 				sizeof(struct lancer_cmd_req_write_object)));
2322 
2323 	be_mcc_notify(adapter);
2324 	spin_unlock_bh(&adapter->mcc_lock);
2325 
2326 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2327 					 msecs_to_jiffies(60000)))
2328 		status = -ETIMEDOUT;
2329 	else
2330 		status = adapter->flash_status;
2331 
2332 	resp = embedded_payload(wrb);
2333 	if (!status) {
2334 		*data_written = le32_to_cpu(resp->actual_write_len);
2335 		*change_status = resp->change_status;
2336 	} else {
2337 		*addn_status = resp->additional_status;
2338 	}
2339 
2340 	return status;
2341 
2342 err_unlock:
2343 	spin_unlock_bh(&adapter->mcc_lock);
2344 	return status;
2345 }
2346 
2347 int be_cmd_query_cable_type(struct be_adapter *adapter)
2348 {
2349 	u8 page_data[PAGE_DATA_LEN];
2350 	int status;
2351 
2352 	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2353 						   page_data);
2354 	if (!status) {
2355 		switch (adapter->phy.interface_type) {
2356 		case PHY_TYPE_QSFP:
2357 			adapter->phy.cable_type =
2358 				page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
2359 			break;
2360 		case PHY_TYPE_SFP_PLUS_10GB:
2361 			adapter->phy.cable_type =
2362 				page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
2363 			break;
2364 		default:
2365 			adapter->phy.cable_type = 0;
2366 			break;
2367 		}
2368 	}
2369 	return status;
2370 }
2371 
2372 int be_cmd_query_sfp_info(struct be_adapter *adapter)
2373 {
2374 	u8 page_data[PAGE_DATA_LEN];
2375 	int status;
2376 
2377 	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2378 						   page_data);
2379 	if (!status) {
2380 		strlcpy(adapter->phy.vendor_name, page_data +
2381 			SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
2382 		strlcpy(adapter->phy.vendor_pn,
2383 			page_data + SFP_VENDOR_PN_OFFSET,
2384 			SFP_VENDOR_NAME_LEN - 1);
2385 	}
2386 
2387 	return status;
2388 }
2389 
2390 int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
2391 {
2392 	struct lancer_cmd_req_delete_object *req;
2393 	struct be_mcc_wrb *wrb;
2394 	int status;
2395 
2396 	spin_lock_bh(&adapter->mcc_lock);
2397 
2398 	wrb = wrb_from_mccq(adapter);
2399 	if (!wrb) {
2400 		status = -EBUSY;
2401 		goto err;
2402 	}
2403 
2404 	req = embedded_payload(wrb);
2405 
2406 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2407 			       OPCODE_COMMON_DELETE_OBJECT,
2408 			       sizeof(*req), wrb, NULL);
2409 
2410 	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2411 
2412 	status = be_mcc_notify_wait(adapter);
2413 err:
2414 	spin_unlock_bh(&adapter->mcc_lock);
2415 	return status;
2416 }
2417 
2418 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2419 			   u32 data_size, u32 data_offset, const char *obj_name,
2420 			   u32 *data_read, u32 *eof, u8 *addn_status)
2421 {
2422 	struct be_mcc_wrb *wrb;
2423 	struct lancer_cmd_req_read_object *req;
2424 	struct lancer_cmd_resp_read_object *resp;
2425 	int status;
2426 
2427 	spin_lock_bh(&adapter->mcc_lock);
2428 
2429 	wrb = wrb_from_mccq(adapter);
2430 	if (!wrb) {
2431 		status = -EBUSY;
2432 		goto err_unlock;
2433 	}
2434 
2435 	req = embedded_payload(wrb);
2436 
2437 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2438 			       OPCODE_COMMON_READ_OBJECT,
2439 			       sizeof(struct lancer_cmd_req_read_object), wrb,
2440 			       NULL);
2441 
2442 	req->desired_read_len = cpu_to_le32(data_size);
2443 	req->read_offset = cpu_to_le32(data_offset);
2444 	strcpy(req->object_name, obj_name);
2445 	req->descriptor_count = cpu_to_le32(1);
2446 	req->buf_len = cpu_to_le32(data_size);
2447 	req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2448 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2449 
2450 	status = be_mcc_notify_wait(adapter);
2451 
2452 	resp = embedded_payload(wrb);
2453 	if (!status) {
2454 		*data_read = le32_to_cpu(resp->actual_read_len);
2455 		*eof = le32_to_cpu(resp->eof);
2456 	} else {
2457 		*addn_status = resp->additional_status;
2458 	}
2459 
2460 err_unlock:
2461 	spin_unlock_bh(&adapter->mcc_lock);
2462 	return status;
2463 }
2464 
2465 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2466 			  u32 flash_type, u32 flash_opcode, u32 img_offset,
2467 			  u32 buf_size)
2468 {
2469 	struct be_mcc_wrb *wrb;
2470 	struct be_cmd_write_flashrom *req;
2471 	int status;
2472 
2473 	spin_lock_bh(&adapter->mcc_lock);
2474 	adapter->flash_status = 0;
2475 
2476 	wrb = wrb_from_mccq(adapter);
2477 	if (!wrb) {
2478 		status = -EBUSY;
2479 		goto err_unlock;
2480 	}
2481 	req = cmd->va;
2482 
2483 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2484 			       OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2485 			       cmd);
2486 
2487 	req->params.op_type = cpu_to_le32(flash_type);
2488 	if (flash_type == OPTYPE_OFFSET_SPECIFIED)
2489 		req->params.offset = cpu_to_le32(img_offset);
2490 
2491 	req->params.op_code = cpu_to_le32(flash_opcode);
2492 	req->params.data_buf_size = cpu_to_le32(buf_size);
2493 
2494 	be_mcc_notify(adapter);
2495 	spin_unlock_bh(&adapter->mcc_lock);
2496 
2497 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2498 					 msecs_to_jiffies(40000)))
2499 		status = -ETIMEDOUT;
2500 	else
2501 		status = adapter->flash_status;
2502 
2503 	return status;
2504 
2505 err_unlock:
2506 	spin_unlock_bh(&adapter->mcc_lock);
2507 	return status;
2508 }
2509 
2510 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2511 			 u16 img_optype, u32 img_offset, u32 crc_offset)
2512 {
2513 	struct be_cmd_read_flash_crc *req;
2514 	struct be_mcc_wrb *wrb;
2515 	int status;
2516 
2517 	spin_lock_bh(&adapter->mcc_lock);
2518 
2519 	wrb = wrb_from_mccq(adapter);
2520 	if (!wrb) {
2521 		status = -EBUSY;
2522 		goto err;
2523 	}
2524 	req = embedded_payload(wrb);
2525 
2526 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2527 			       OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2528 			       wrb, NULL);
2529 
2530 	req->params.op_type = cpu_to_le32(img_optype);
2531 	if (img_optype == OPTYPE_OFFSET_SPECIFIED)
2532 		req->params.offset = cpu_to_le32(img_offset + crc_offset);
2533 	else
2534 		req->params.offset = cpu_to_le32(crc_offset);
2535 
2536 	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2537 	req->params.data_buf_size = cpu_to_le32(0x4);
2538 
2539 	status = be_mcc_notify_wait(adapter);
2540 	if (!status)
2541 		memcpy(flashed_crc, req->crc, 4);
2542 
2543 err:
2544 	spin_unlock_bh(&adapter->mcc_lock);
2545 	return status;
2546 }
2547 
2548 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2549 			    struct be_dma_mem *nonemb_cmd)
2550 {
2551 	struct be_mcc_wrb *wrb;
2552 	struct be_cmd_req_acpi_wol_magic_config *req;
2553 	int status;
2554 
2555 	spin_lock_bh(&adapter->mcc_lock);
2556 
2557 	wrb = wrb_from_mccq(adapter);
2558 	if (!wrb) {
2559 		status = -EBUSY;
2560 		goto err;
2561 	}
2562 	req = nonemb_cmd->va;
2563 
2564 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2565 			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
2566 			       wrb, nonemb_cmd);
2567 	memcpy(req->magic_mac, mac, ETH_ALEN);
2568 
2569 	status = be_mcc_notify_wait(adapter);
2570 
2571 err:
2572 	spin_unlock_bh(&adapter->mcc_lock);
2573 	return status;
2574 }
2575 
2576 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2577 			u8 loopback_type, u8 enable)
2578 {
2579 	struct be_mcc_wrb *wrb;
2580 	struct be_cmd_req_set_lmode *req;
2581 	int status;
2582 
2583 	spin_lock_bh(&adapter->mcc_lock);
2584 
2585 	wrb = wrb_from_mccq(adapter);
2586 	if (!wrb) {
2587 		status = -EBUSY;
2588 		goto err;
2589 	}
2590 
2591 	req = embedded_payload(wrb);
2592 
2593 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2594 			       OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
2595 			       wrb, NULL);
2596 
2597 	req->src_port = port_num;
2598 	req->dest_port = port_num;
2599 	req->loopback_type = loopback_type;
2600 	req->loopback_state = enable;
2601 
2602 	status = be_mcc_notify_wait(adapter);
2603 err:
2604 	spin_unlock_bh(&adapter->mcc_lock);
2605 	return status;
2606 }
2607 
2608 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2609 			 u32 loopback_type, u32 pkt_size, u32 num_pkts,
2610 			 u64 pattern)
2611 {
2612 	struct be_mcc_wrb *wrb;
2613 	struct be_cmd_req_loopback_test *req;
2614 	struct be_cmd_resp_loopback_test *resp;
2615 	int status;
2616 
2617 	spin_lock_bh(&adapter->mcc_lock);
2618 
2619 	wrb = wrb_from_mccq(adapter);
2620 	if (!wrb) {
2621 		status = -EBUSY;
2622 		goto err;
2623 	}
2624 
2625 	req = embedded_payload(wrb);
2626 
2627 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2628 			       OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
2629 			       NULL);
2630 
2631 	req->hdr.timeout = cpu_to_le32(15);
2632 	req->pattern = cpu_to_le64(pattern);
2633 	req->src_port = cpu_to_le32(port_num);
2634 	req->dest_port = cpu_to_le32(port_num);
2635 	req->pkt_size = cpu_to_le32(pkt_size);
2636 	req->num_pkts = cpu_to_le32(num_pkts);
2637 	req->loopback_type = cpu_to_le32(loopback_type);
2638 
2639 	be_mcc_notify(adapter);
2640 
2641 	spin_unlock_bh(&adapter->mcc_lock);
2642 
2643 	wait_for_completion(&adapter->et_cmd_compl);
2644 	resp = embedded_payload(wrb);
2645 	status = le32_to_cpu(resp->status);
2646 
2647 	return status;
2648 err:
2649 	spin_unlock_bh(&adapter->mcc_lock);
2650 	return status;
2651 }
2652 
2653 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2654 			u32 byte_cnt, struct be_dma_mem *cmd)
2655 {
2656 	struct be_mcc_wrb *wrb;
2657 	struct be_cmd_req_ddrdma_test *req;
2658 	int status;
2659 	int i, j = 0;
2660 
2661 	spin_lock_bh(&adapter->mcc_lock);
2662 
2663 	wrb = wrb_from_mccq(adapter);
2664 	if (!wrb) {
2665 		status = -EBUSY;
2666 		goto err;
2667 	}
2668 	req = cmd->va;
2669 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2670 			       OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
2671 			       cmd);
2672 
2673 	req->pattern = cpu_to_le64(pattern);
2674 	req->byte_count = cpu_to_le32(byte_cnt);
2675 	for (i = 0; i < byte_cnt; i++) {
2676 		req->snd_buff[i] = (u8)(pattern >> (j*8));
2677 		j++;
2678 		if (j > 7)
2679 			j = 0;
2680 	}
2681 
2682 	status = be_mcc_notify_wait(adapter);
2683 
2684 	if (!status) {
2685 		struct be_cmd_resp_ddrdma_test *resp;
2686 
2687 		resp = cmd->va;
2688 		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2689 		    resp->snd_err) {
2690 			status = -1;
2691 		}
2692 	}
2693 
2694 err:
2695 	spin_unlock_bh(&adapter->mcc_lock);
2696 	return status;
2697 }
2698 
2699 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2700 			    struct be_dma_mem *nonemb_cmd)
2701 {
2702 	struct be_mcc_wrb *wrb;
2703 	struct be_cmd_req_seeprom_read *req;
2704 	int status;
2705 
2706 	spin_lock_bh(&adapter->mcc_lock);
2707 
2708 	wrb = wrb_from_mccq(adapter);
2709 	if (!wrb) {
2710 		status = -EBUSY;
2711 		goto err;
2712 	}
2713 	req = nonemb_cmd->va;
2714 
2715 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2716 			       OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2717 			       nonemb_cmd);
2718 
2719 	status = be_mcc_notify_wait(adapter);
2720 
2721 err:
2722 	spin_unlock_bh(&adapter->mcc_lock);
2723 	return status;
2724 }
2725 
2726 int be_cmd_get_phy_info(struct be_adapter *adapter)
2727 {
2728 	struct be_mcc_wrb *wrb;
2729 	struct be_cmd_req_get_phy_info *req;
2730 	struct be_dma_mem cmd;
2731 	int status;
2732 
2733 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2734 			    CMD_SUBSYSTEM_COMMON))
2735 		return -EPERM;
2736 
2737 	spin_lock_bh(&adapter->mcc_lock);
2738 
2739 	wrb = wrb_from_mccq(adapter);
2740 	if (!wrb) {
2741 		status = -EBUSY;
2742 		goto err;
2743 	}
2744 	cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2745 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2746 				     GFP_ATOMIC);
2747 	if (!cmd.va) {
2748 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2749 		status = -ENOMEM;
2750 		goto err;
2751 	}
2752 
2753 	req = cmd.va;
2754 
2755 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2756 			       OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2757 			       wrb, &cmd);
2758 
2759 	status = be_mcc_notify_wait(adapter);
2760 	if (!status) {
2761 		struct be_phy_info *resp_phy_info =
2762 				cmd.va + sizeof(struct be_cmd_req_hdr);
2763 
2764 		adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2765 		adapter->phy.interface_type =
2766 			le16_to_cpu(resp_phy_info->interface_type);
2767 		adapter->phy.auto_speeds_supported =
2768 			le16_to_cpu(resp_phy_info->auto_speeds_supported);
2769 		adapter->phy.fixed_speeds_supported =
2770 			le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2771 		adapter->phy.misc_params =
2772 			le32_to_cpu(resp_phy_info->misc_params);
2773 
2774 		if (BE2_chip(adapter)) {
2775 			adapter->phy.fixed_speeds_supported =
2776 				BE_SUPPORTED_SPEED_10GBPS |
2777 				BE_SUPPORTED_SPEED_1GBPS;
2778 		}
2779 	}
2780 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2781 err:
2782 	spin_unlock_bh(&adapter->mcc_lock);
2783 	return status;
2784 }
2785 
2786 static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2787 {
2788 	struct be_mcc_wrb *wrb;
2789 	struct be_cmd_req_set_qos *req;
2790 	int status;
2791 
2792 	spin_lock_bh(&adapter->mcc_lock);
2793 
2794 	wrb = wrb_from_mccq(adapter);
2795 	if (!wrb) {
2796 		status = -EBUSY;
2797 		goto err;
2798 	}
2799 
2800 	req = embedded_payload(wrb);
2801 
2802 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2803 			       OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2804 
2805 	req->hdr.domain = domain;
2806 	req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2807 	req->max_bps_nic = cpu_to_le32(bps);
2808 
2809 	status = be_mcc_notify_wait(adapter);
2810 
2811 err:
2812 	spin_unlock_bh(&adapter->mcc_lock);
2813 	return status;
2814 }
2815 
2816 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2817 {
2818 	struct be_mcc_wrb *wrb;
2819 	struct be_cmd_req_cntl_attribs *req;
2820 	struct be_cmd_resp_cntl_attribs *resp;
2821 	int status;
2822 	int payload_len = max(sizeof(*req), sizeof(*resp));
2823 	struct mgmt_controller_attrib *attribs;
2824 	struct be_dma_mem attribs_cmd;
2825 
2826 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2827 		return -1;
2828 
2829 	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2830 	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2831 	attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
2832 					     attribs_cmd.size,
2833 					     &attribs_cmd.dma, GFP_ATOMIC);
2834 	if (!attribs_cmd.va) {
2835 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
2836 		status = -ENOMEM;
2837 		goto err;
2838 	}
2839 
2840 	wrb = wrb_from_mbox(adapter);
2841 	if (!wrb) {
2842 		status = -EBUSY;
2843 		goto err;
2844 	}
2845 	req = attribs_cmd.va;
2846 
2847 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2848 			       OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
2849 			       wrb, &attribs_cmd);
2850 
2851 	status = be_mbox_notify_wait(adapter);
2852 	if (!status) {
2853 		attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2854 		adapter->hba_port_num = attribs->hba_attribs.phy_port;
2855 	}
2856 
2857 err:
2858 	mutex_unlock(&adapter->mbox_lock);
2859 	if (attribs_cmd.va)
2860 		dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
2861 				  attribs_cmd.va, attribs_cmd.dma);
2862 	return status;
2863 }
2864 
2865 /* Uses mbox */
2866 int be_cmd_req_native_mode(struct be_adapter *adapter)
2867 {
2868 	struct be_mcc_wrb *wrb;
2869 	struct be_cmd_req_set_func_cap *req;
2870 	int status;
2871 
2872 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2873 		return -1;
2874 
2875 	wrb = wrb_from_mbox(adapter);
2876 	if (!wrb) {
2877 		status = -EBUSY;
2878 		goto err;
2879 	}
2880 
2881 	req = embedded_payload(wrb);
2882 
2883 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2884 			       OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
2885 			       sizeof(*req), wrb, NULL);
2886 
2887 	req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2888 				CAPABILITY_BE3_NATIVE_ERX_API);
2889 	req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2890 
2891 	status = be_mbox_notify_wait(adapter);
2892 	if (!status) {
2893 		struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2894 
2895 		adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2896 					CAPABILITY_BE3_NATIVE_ERX_API;
2897 		if (!adapter->be3_native)
2898 			dev_warn(&adapter->pdev->dev,
2899 				 "adapter not in advanced mode\n");
2900 	}
2901 err:
2902 	mutex_unlock(&adapter->mbox_lock);
2903 	return status;
2904 }
2905 
2906 /* Get privilege(s) for a function */
2907 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2908 			     u32 domain)
2909 {
2910 	struct be_mcc_wrb *wrb;
2911 	struct be_cmd_req_get_fn_privileges *req;
2912 	int status;
2913 
2914 	spin_lock_bh(&adapter->mcc_lock);
2915 
2916 	wrb = wrb_from_mccq(adapter);
2917 	if (!wrb) {
2918 		status = -EBUSY;
2919 		goto err;
2920 	}
2921 
2922 	req = embedded_payload(wrb);
2923 
2924 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2925 			       OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2926 			       wrb, NULL);
2927 
2928 	req->hdr.domain = domain;
2929 
2930 	status = be_mcc_notify_wait(adapter);
2931 	if (!status) {
2932 		struct be_cmd_resp_get_fn_privileges *resp =
2933 						embedded_payload(wrb);
2934 
2935 		*privilege = le32_to_cpu(resp->privilege_mask);
2936 
2937 		/* In UMC mode FW does not return right privileges.
2938 		 * Override with correct privilege equivalent to PF.
2939 		 */
2940 		if (BEx_chip(adapter) && be_is_mc(adapter) &&
2941 		    be_physfn(adapter))
2942 			*privilege = MAX_PRIVILEGES;
2943 	}
2944 
2945 err:
2946 	spin_unlock_bh(&adapter->mcc_lock);
2947 	return status;
2948 }
2949 
2950 /* Set privilege(s) for a function */
2951 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2952 			     u32 domain)
2953 {
2954 	struct be_mcc_wrb *wrb;
2955 	struct be_cmd_req_set_fn_privileges *req;
2956 	int status;
2957 
2958 	spin_lock_bh(&adapter->mcc_lock);
2959 
2960 	wrb = wrb_from_mccq(adapter);
2961 	if (!wrb) {
2962 		status = -EBUSY;
2963 		goto err;
2964 	}
2965 
2966 	req = embedded_payload(wrb);
2967 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2968 			       OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2969 			       wrb, NULL);
2970 	req->hdr.domain = domain;
2971 	if (lancer_chip(adapter))
2972 		req->privileges_lancer = cpu_to_le32(privileges);
2973 	else
2974 		req->privileges = cpu_to_le32(privileges);
2975 
2976 	status = be_mcc_notify_wait(adapter);
2977 err:
2978 	spin_unlock_bh(&adapter->mcc_lock);
2979 	return status;
2980 }
2981 
2982 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2983  * pmac_id_valid: false => pmac_id or MAC address is requested.
2984  *		  If pmac_id is returned, pmac_id_valid is returned as true
2985  */
2986 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2987 			     bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
2988 			     u8 domain)
2989 {
2990 	struct be_mcc_wrb *wrb;
2991 	struct be_cmd_req_get_mac_list *req;
2992 	int status;
2993 	int mac_count;
2994 	struct be_dma_mem get_mac_list_cmd;
2995 	int i;
2996 
2997 	memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2998 	get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2999 	get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3000 						  get_mac_list_cmd.size,
3001 						  &get_mac_list_cmd.dma,
3002 						  GFP_ATOMIC);
3003 
3004 	if (!get_mac_list_cmd.va) {
3005 		dev_err(&adapter->pdev->dev,
3006 			"Memory allocation failure during GET_MAC_LIST\n");
3007 		return -ENOMEM;
3008 	}
3009 
3010 	spin_lock_bh(&adapter->mcc_lock);
3011 
3012 	wrb = wrb_from_mccq(adapter);
3013 	if (!wrb) {
3014 		status = -EBUSY;
3015 		goto out;
3016 	}
3017 
3018 	req = get_mac_list_cmd.va;
3019 
3020 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3021 			       OPCODE_COMMON_GET_MAC_LIST,
3022 			       get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
3023 	req->hdr.domain = domain;
3024 	req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
3025 	if (*pmac_id_valid) {
3026 		req->mac_id = cpu_to_le32(*pmac_id);
3027 		req->iface_id = cpu_to_le16(if_handle);
3028 		req->perm_override = 0;
3029 	} else {
3030 		req->perm_override = 1;
3031 	}
3032 
3033 	status = be_mcc_notify_wait(adapter);
3034 	if (!status) {
3035 		struct be_cmd_resp_get_mac_list *resp =
3036 						get_mac_list_cmd.va;
3037 
3038 		if (*pmac_id_valid) {
3039 			memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
3040 			       ETH_ALEN);
3041 			goto out;
3042 		}
3043 
3044 		mac_count = resp->true_mac_count + resp->pseudo_mac_count;
3045 		/* Mac list returned could contain one or more active mac_ids
3046 		 * or one or more true or pseudo permanent mac addresses.
3047 		 * If an active mac_id is present, return first active mac_id
3048 		 * found.
3049 		 */
3050 		for (i = 0; i < mac_count; i++) {
3051 			struct get_list_macaddr *mac_entry;
3052 			u16 mac_addr_size;
3053 			u32 mac_id;
3054 
3055 			mac_entry = &resp->macaddr_list[i];
3056 			mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
3057 			/* mac_id is a 32 bit value and mac_addr size
3058 			 * is 6 bytes
3059 			 */
3060 			if (mac_addr_size == sizeof(u32)) {
3061 				*pmac_id_valid = true;
3062 				mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
3063 				*pmac_id = le32_to_cpu(mac_id);
3064 				goto out;
3065 			}
3066 		}
3067 		/* If no active mac_id found, return first mac addr */
3068 		*pmac_id_valid = false;
3069 		memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
3070 		       ETH_ALEN);
3071 	}
3072 
3073 out:
3074 	spin_unlock_bh(&adapter->mcc_lock);
3075 	dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
3076 			  get_mac_list_cmd.va, get_mac_list_cmd.dma);
3077 	return status;
3078 }
3079 
3080 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
3081 			  u8 *mac, u32 if_handle, bool active, u32 domain)
3082 {
3083 	if (!active)
3084 		be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
3085 					 if_handle, domain);
3086 	if (BEx_chip(adapter))
3087 		return be_cmd_mac_addr_query(adapter, mac, false,
3088 					     if_handle, curr_pmac_id);
3089 	else
3090 		/* Fetch the MAC address using pmac_id */
3091 		return be_cmd_get_mac_from_list(adapter, mac, &active,
3092 						&curr_pmac_id,
3093 						if_handle, domain);
3094 }
3095 
3096 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
3097 {
3098 	int status;
3099 	bool pmac_valid = false;
3100 
3101 	eth_zero_addr(mac);
3102 
3103 	if (BEx_chip(adapter)) {
3104 		if (be_physfn(adapter))
3105 			status = be_cmd_mac_addr_query(adapter, mac, true, 0,
3106 						       0);
3107 		else
3108 			status = be_cmd_mac_addr_query(adapter, mac, false,
3109 						       adapter->if_handle, 0);
3110 	} else {
3111 		status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
3112 						  NULL, adapter->if_handle, 0);
3113 	}
3114 
3115 	return status;
3116 }
3117 
3118 /* Uses synchronous MCCQ */
3119 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3120 			u8 mac_count, u32 domain)
3121 {
3122 	struct be_mcc_wrb *wrb;
3123 	struct be_cmd_req_set_mac_list *req;
3124 	int status;
3125 	struct be_dma_mem cmd;
3126 
3127 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3128 	cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3129 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3130 				     GFP_KERNEL);
3131 	if (!cmd.va)
3132 		return -ENOMEM;
3133 
3134 	spin_lock_bh(&adapter->mcc_lock);
3135 
3136 	wrb = wrb_from_mccq(adapter);
3137 	if (!wrb) {
3138 		status = -EBUSY;
3139 		goto err;
3140 	}
3141 
3142 	req = cmd.va;
3143 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3144 			       OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
3145 			       wrb, &cmd);
3146 
3147 	req->hdr.domain = domain;
3148 	req->mac_count = mac_count;
3149 	if (mac_count)
3150 		memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
3151 
3152 	status = be_mcc_notify_wait(adapter);
3153 
3154 err:
3155 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3156 	spin_unlock_bh(&adapter->mcc_lock);
3157 	return status;
3158 }
3159 
3160 /* Wrapper to delete any active MACs and provision the new mac.
3161  * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3162  * current list are active.
3163  */
3164 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
3165 {
3166 	bool active_mac = false;
3167 	u8 old_mac[ETH_ALEN];
3168 	u32 pmac_id;
3169 	int status;
3170 
3171 	status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
3172 					  &pmac_id, if_id, dom);
3173 
3174 	if (!status && active_mac)
3175 		be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
3176 
3177 	return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
3178 }
3179 
3180 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3181 			  u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
3182 {
3183 	struct be_mcc_wrb *wrb;
3184 	struct be_cmd_req_set_hsw_config *req;
3185 	void *ctxt;
3186 	int status;
3187 
3188 	spin_lock_bh(&adapter->mcc_lock);
3189 
3190 	wrb = wrb_from_mccq(adapter);
3191 	if (!wrb) {
3192 		status = -EBUSY;
3193 		goto err;
3194 	}
3195 
3196 	req = embedded_payload(wrb);
3197 	ctxt = &req->context;
3198 
3199 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3200 			       OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3201 			       NULL);
3202 
3203 	req->hdr.domain = domain;
3204 	AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3205 	if (pvid) {
3206 		AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3207 		AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3208 	}
3209 	if (!BEx_chip(adapter) && hsw_mode) {
3210 		AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3211 			      ctxt, adapter->hba_port_num);
3212 		AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3213 		AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3214 			      ctxt, hsw_mode);
3215 	}
3216 
3217 	/* Enable/disable both mac and vlan spoof checking */
3218 	if (!BEx_chip(adapter) && spoofchk) {
3219 		AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
3220 			      ctxt, spoofchk);
3221 		AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
3222 			      ctxt, spoofchk);
3223 	}
3224 
3225 	be_dws_cpu_to_le(req->context, sizeof(req->context));
3226 	status = be_mcc_notify_wait(adapter);
3227 
3228 err:
3229 	spin_unlock_bh(&adapter->mcc_lock);
3230 	return status;
3231 }
3232 
3233 /* Get Hyper switch config */
3234 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3235 			  u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
3236 {
3237 	struct be_mcc_wrb *wrb;
3238 	struct be_cmd_req_get_hsw_config *req;
3239 	void *ctxt;
3240 	int status;
3241 	u16 vid;
3242 
3243 	spin_lock_bh(&adapter->mcc_lock);
3244 
3245 	wrb = wrb_from_mccq(adapter);
3246 	if (!wrb) {
3247 		status = -EBUSY;
3248 		goto err;
3249 	}
3250 
3251 	req = embedded_payload(wrb);
3252 	ctxt = &req->context;
3253 
3254 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3255 			       OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3256 			       NULL);
3257 
3258 	req->hdr.domain = domain;
3259 	AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3260 		      ctxt, intf_id);
3261 	AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
3262 
3263 	if (!BEx_chip(adapter) && mode) {
3264 		AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3265 			      ctxt, adapter->hba_port_num);
3266 		AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3267 	}
3268 	be_dws_cpu_to_le(req->context, sizeof(req->context));
3269 
3270 	status = be_mcc_notify_wait(adapter);
3271 	if (!status) {
3272 		struct be_cmd_resp_get_hsw_config *resp =
3273 						embedded_payload(wrb);
3274 
3275 		be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3276 		vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3277 				    pvid, &resp->context);
3278 		if (pvid)
3279 			*pvid = le16_to_cpu(vid);
3280 		if (mode)
3281 			*mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3282 					      port_fwd_type, &resp->context);
3283 		if (spoofchk)
3284 			*spoofchk =
3285 				AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3286 					      spoofchk, &resp->context);
3287 	}
3288 
3289 err:
3290 	spin_unlock_bh(&adapter->mcc_lock);
3291 	return status;
3292 }
3293 
3294 static bool be_is_wol_excluded(struct be_adapter *adapter)
3295 {
3296 	struct pci_dev *pdev = adapter->pdev;
3297 
3298 	if (be_virtfn(adapter))
3299 		return true;
3300 
3301 	switch (pdev->subsystem_device) {
3302 	case OC_SUBSYS_DEVICE_ID1:
3303 	case OC_SUBSYS_DEVICE_ID2:
3304 	case OC_SUBSYS_DEVICE_ID3:
3305 	case OC_SUBSYS_DEVICE_ID4:
3306 		return true;
3307 	default:
3308 		return false;
3309 	}
3310 }
3311 
3312 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3313 {
3314 	struct be_mcc_wrb *wrb;
3315 	struct be_cmd_req_acpi_wol_magic_config_v1 *req;
3316 	int status = 0;
3317 	struct be_dma_mem cmd;
3318 
3319 	if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3320 			    CMD_SUBSYSTEM_ETH))
3321 		return -EPERM;
3322 
3323 	if (be_is_wol_excluded(adapter))
3324 		return status;
3325 
3326 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3327 		return -1;
3328 
3329 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3330 	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3331 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3332 				     GFP_ATOMIC);
3333 	if (!cmd.va) {
3334 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3335 		status = -ENOMEM;
3336 		goto err;
3337 	}
3338 
3339 	wrb = wrb_from_mbox(adapter);
3340 	if (!wrb) {
3341 		status = -EBUSY;
3342 		goto err;
3343 	}
3344 
3345 	req = cmd.va;
3346 
3347 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3348 			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3349 			       sizeof(*req), wrb, &cmd);
3350 
3351 	req->hdr.version = 1;
3352 	req->query_options = BE_GET_WOL_CAP;
3353 
3354 	status = be_mbox_notify_wait(adapter);
3355 	if (!status) {
3356 		struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3357 
3358 		resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
3359 
3360 		adapter->wol_cap = resp->wol_settings;
3361 		if (adapter->wol_cap & BE_WOL_CAP)
3362 			adapter->wol_en = true;
3363 	}
3364 err:
3365 	mutex_unlock(&adapter->mbox_lock);
3366 	if (cmd.va)
3367 		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3368 				  cmd.dma);
3369 	return status;
3370 
3371 }
3372 
3373 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3374 {
3375 	struct be_dma_mem extfat_cmd;
3376 	struct be_fat_conf_params *cfgs;
3377 	int status;
3378 	int i, j;
3379 
3380 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3381 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3382 	extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3383 					    extfat_cmd.size, &extfat_cmd.dma,
3384 					    GFP_ATOMIC);
3385 	if (!extfat_cmd.va)
3386 		return -ENOMEM;
3387 
3388 	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3389 	if (status)
3390 		goto err;
3391 
3392 	cfgs = (struct be_fat_conf_params *)
3393 			(extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
3394 	for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
3395 		u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
3396 
3397 		for (j = 0; j < num_modes; j++) {
3398 			if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
3399 				cfgs->module[i].trace_lvl[j].dbg_lvl =
3400 							cpu_to_le32(level);
3401 		}
3402 	}
3403 
3404 	status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
3405 err:
3406 	dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
3407 			  extfat_cmd.dma);
3408 	return status;
3409 }
3410 
3411 int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3412 {
3413 	struct be_dma_mem extfat_cmd;
3414 	struct be_fat_conf_params *cfgs;
3415 	int status, j;
3416 	int level = 0;
3417 
3418 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3419 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3420 	extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3421 					    extfat_cmd.size, &extfat_cmd.dma,
3422 					    GFP_ATOMIC);
3423 
3424 	if (!extfat_cmd.va) {
3425 		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3426 			__func__);
3427 		goto err;
3428 	}
3429 
3430 	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3431 	if (!status) {
3432 		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3433 						sizeof(struct be_cmd_resp_hdr));
3434 
3435 		for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3436 			if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3437 				level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3438 		}
3439 	}
3440 	dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
3441 			  extfat_cmd.dma);
3442 err:
3443 	return level;
3444 }
3445 
3446 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3447 				   struct be_dma_mem *cmd)
3448 {
3449 	struct be_mcc_wrb *wrb;
3450 	struct be_cmd_req_get_ext_fat_caps *req;
3451 	int status;
3452 
3453 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3454 		return -1;
3455 
3456 	wrb = wrb_from_mbox(adapter);
3457 	if (!wrb) {
3458 		status = -EBUSY;
3459 		goto err;
3460 	}
3461 
3462 	req = cmd->va;
3463 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3464 			       OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3465 			       cmd->size, wrb, cmd);
3466 	req->parameter_type = cpu_to_le32(1);
3467 
3468 	status = be_mbox_notify_wait(adapter);
3469 err:
3470 	mutex_unlock(&adapter->mbox_lock);
3471 	return status;
3472 }
3473 
3474 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3475 				   struct be_dma_mem *cmd,
3476 				   struct be_fat_conf_params *configs)
3477 {
3478 	struct be_mcc_wrb *wrb;
3479 	struct be_cmd_req_set_ext_fat_caps *req;
3480 	int status;
3481 
3482 	spin_lock_bh(&adapter->mcc_lock);
3483 
3484 	wrb = wrb_from_mccq(adapter);
3485 	if (!wrb) {
3486 		status = -EBUSY;
3487 		goto err;
3488 	}
3489 
3490 	req = cmd->va;
3491 	memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3492 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3493 			       OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3494 			       cmd->size, wrb, cmd);
3495 
3496 	status = be_mcc_notify_wait(adapter);
3497 err:
3498 	spin_unlock_bh(&adapter->mcc_lock);
3499 	return status;
3500 }
3501 
3502 int be_cmd_query_port_name(struct be_adapter *adapter)
3503 {
3504 	struct be_cmd_req_get_port_name *req;
3505 	struct be_mcc_wrb *wrb;
3506 	int status;
3507 
3508 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3509 		return -1;
3510 
3511 	wrb = wrb_from_mbox(adapter);
3512 	req = embedded_payload(wrb);
3513 
3514 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3515 			       OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3516 			       NULL);
3517 	if (!BEx_chip(adapter))
3518 		req->hdr.version = 1;
3519 
3520 	status = be_mbox_notify_wait(adapter);
3521 	if (!status) {
3522 		struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3523 
3524 		adapter->port_name = resp->port_name[adapter->hba_port_num];
3525 	} else {
3526 		adapter->port_name = adapter->hba_port_num + '0';
3527 	}
3528 
3529 	mutex_unlock(&adapter->mbox_lock);
3530 	return status;
3531 }
3532 
3533 /* Descriptor type */
3534 enum {
3535 	FUNC_DESC = 1,
3536 	VFT_DESC = 2
3537 };
3538 
3539 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
3540 					       int desc_type)
3541 {
3542 	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3543 	struct be_nic_res_desc *nic;
3544 	int i;
3545 
3546 	for (i = 0; i < desc_count; i++) {
3547 		if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3548 		    hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
3549 			nic = (struct be_nic_res_desc *)hdr;
3550 			if (desc_type == FUNC_DESC ||
3551 			    (desc_type == VFT_DESC &&
3552 			     nic->flags & (1 << VFT_SHIFT)))
3553 				return nic;
3554 		}
3555 
3556 		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3557 		hdr = (void *)hdr + hdr->desc_len;
3558 	}
3559 	return NULL;
3560 }
3561 
3562 static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
3563 {
3564 	return be_get_nic_desc(buf, desc_count, VFT_DESC);
3565 }
3566 
3567 static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
3568 {
3569 	return be_get_nic_desc(buf, desc_count, FUNC_DESC);
3570 }
3571 
3572 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3573 						 u32 desc_count)
3574 {
3575 	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3576 	struct be_pcie_res_desc *pcie;
3577 	int i;
3578 
3579 	for (i = 0; i < desc_count; i++) {
3580 		if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3581 		     hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3582 			pcie = (struct be_pcie_res_desc	*)hdr;
3583 			if (pcie->pf_num == devfn)
3584 				return pcie;
3585 		}
3586 
3587 		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3588 		hdr = (void *)hdr + hdr->desc_len;
3589 	}
3590 	return NULL;
3591 }
3592 
3593 static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
3594 {
3595 	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3596 	int i;
3597 
3598 	for (i = 0; i < desc_count; i++) {
3599 		if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
3600 			return (struct be_port_res_desc *)hdr;
3601 
3602 		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3603 		hdr = (void *)hdr + hdr->desc_len;
3604 	}
3605 	return NULL;
3606 }
3607 
3608 static void be_copy_nic_desc(struct be_resources *res,
3609 			     struct be_nic_res_desc *desc)
3610 {
3611 	res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3612 	res->max_vlans = le16_to_cpu(desc->vlan_count);
3613 	res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3614 	res->max_tx_qs = le16_to_cpu(desc->txq_count);
3615 	res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3616 	res->max_rx_qs = le16_to_cpu(desc->rq_count);
3617 	res->max_evt_qs = le16_to_cpu(desc->eq_count);
3618 	res->max_cq_count = le16_to_cpu(desc->cq_count);
3619 	res->max_iface_count = le16_to_cpu(desc->iface_count);
3620 	res->max_mcc_count = le16_to_cpu(desc->mcc_count);
3621 	/* Clear flags that driver is not interested in */
3622 	res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3623 				BE_IF_CAP_FLAGS_WANT;
3624 }
3625 
3626 /* Uses Mbox */
3627 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3628 {
3629 	struct be_mcc_wrb *wrb;
3630 	struct be_cmd_req_get_func_config *req;
3631 	int status;
3632 	struct be_dma_mem cmd;
3633 
3634 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3635 		return -1;
3636 
3637 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3638 	cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3639 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3640 				     GFP_ATOMIC);
3641 	if (!cmd.va) {
3642 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3643 		status = -ENOMEM;
3644 		goto err;
3645 	}
3646 
3647 	wrb = wrb_from_mbox(adapter);
3648 	if (!wrb) {
3649 		status = -EBUSY;
3650 		goto err;
3651 	}
3652 
3653 	req = cmd.va;
3654 
3655 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3656 			       OPCODE_COMMON_GET_FUNC_CONFIG,
3657 			       cmd.size, wrb, &cmd);
3658 
3659 	if (skyhawk_chip(adapter))
3660 		req->hdr.version = 1;
3661 
3662 	status = be_mbox_notify_wait(adapter);
3663 	if (!status) {
3664 		struct be_cmd_resp_get_func_config *resp = cmd.va;
3665 		u32 desc_count = le32_to_cpu(resp->desc_count);
3666 		struct be_nic_res_desc *desc;
3667 
3668 		desc = be_get_func_nic_desc(resp->func_param, desc_count);
3669 		if (!desc) {
3670 			status = -EINVAL;
3671 			goto err;
3672 		}
3673 
3674 		adapter->pf_number = desc->pf_num;
3675 		be_copy_nic_desc(res, desc);
3676 	}
3677 err:
3678 	mutex_unlock(&adapter->mbox_lock);
3679 	if (cmd.va)
3680 		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3681 				  cmd.dma);
3682 	return status;
3683 }
3684 
3685 /* Will use MBOX only if MCCQ has not been created */
3686 int be_cmd_get_profile_config(struct be_adapter *adapter,
3687 			      struct be_resources *res, u8 query, u8 domain)
3688 {
3689 	struct be_cmd_resp_get_profile_config *resp;
3690 	struct be_cmd_req_get_profile_config *req;
3691 	struct be_nic_res_desc *vf_res;
3692 	struct be_pcie_res_desc *pcie;
3693 	struct be_port_res_desc *port;
3694 	struct be_nic_res_desc *nic;
3695 	struct be_mcc_wrb wrb = {0};
3696 	struct be_dma_mem cmd;
3697 	u16 desc_count;
3698 	int status;
3699 
3700 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3701 	cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3702 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3703 				     GFP_ATOMIC);
3704 	if (!cmd.va)
3705 		return -ENOMEM;
3706 
3707 	req = cmd.va;
3708 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3709 			       OPCODE_COMMON_GET_PROFILE_CONFIG,
3710 			       cmd.size, &wrb, &cmd);
3711 
3712 	req->hdr.domain = domain;
3713 	if (!lancer_chip(adapter))
3714 		req->hdr.version = 1;
3715 	req->type = ACTIVE_PROFILE_TYPE;
3716 
3717 	/* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
3718 	 * descriptors with all bits set to "1" for the fields which can be
3719 	 * modified using SET_PROFILE_CONFIG cmd.
3720 	 */
3721 	if (query == RESOURCE_MODIFIABLE)
3722 		req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
3723 
3724 	status = be_cmd_notify_wait(adapter, &wrb);
3725 	if (status)
3726 		goto err;
3727 
3728 	resp = cmd.va;
3729 	desc_count = le16_to_cpu(resp->desc_count);
3730 
3731 	pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3732 				desc_count);
3733 	if (pcie)
3734 		res->max_vfs = le16_to_cpu(pcie->num_vfs);
3735 
3736 	port = be_get_port_desc(resp->func_param, desc_count);
3737 	if (port)
3738 		adapter->mc_type = port->mc_type;
3739 
3740 	nic = be_get_func_nic_desc(resp->func_param, desc_count);
3741 	if (nic)
3742 		be_copy_nic_desc(res, nic);
3743 
3744 	vf_res = be_get_vft_desc(resp->func_param, desc_count);
3745 	if (vf_res)
3746 		res->vf_if_cap_flags = vf_res->cap_flags;
3747 err:
3748 	if (cmd.va)
3749 		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3750 				  cmd.dma);
3751 	return status;
3752 }
3753 
3754 /* Will use MBOX only if MCCQ has not been created */
3755 static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3756 				     int size, int count, u8 version, u8 domain)
3757 {
3758 	struct be_cmd_req_set_profile_config *req;
3759 	struct be_mcc_wrb wrb = {0};
3760 	struct be_dma_mem cmd;
3761 	int status;
3762 
3763 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3764 	cmd.size = sizeof(struct be_cmd_req_set_profile_config);
3765 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3766 				     GFP_ATOMIC);
3767 	if (!cmd.va)
3768 		return -ENOMEM;
3769 
3770 	req = cmd.va;
3771 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3772 			       OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
3773 			       &wrb, &cmd);
3774 	req->hdr.version = version;
3775 	req->hdr.domain = domain;
3776 	req->desc_count = cpu_to_le32(count);
3777 	memcpy(req->desc, desc, size);
3778 
3779 	status = be_cmd_notify_wait(adapter, &wrb);
3780 
3781 	if (cmd.va)
3782 		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3783 				  cmd.dma);
3784 	return status;
3785 }
3786 
3787 /* Mark all fields invalid */
3788 static void be_reset_nic_desc(struct be_nic_res_desc *nic)
3789 {
3790 	memset(nic, 0, sizeof(*nic));
3791 	nic->unicast_mac_count = 0xFFFF;
3792 	nic->mcc_count = 0xFFFF;
3793 	nic->vlan_count = 0xFFFF;
3794 	nic->mcast_mac_count = 0xFFFF;
3795 	nic->txq_count = 0xFFFF;
3796 	nic->rq_count = 0xFFFF;
3797 	nic->rssq_count = 0xFFFF;
3798 	nic->lro_count = 0xFFFF;
3799 	nic->cq_count = 0xFFFF;
3800 	nic->toe_conn_count = 0xFFFF;
3801 	nic->eq_count = 0xFFFF;
3802 	nic->iface_count = 0xFFFF;
3803 	nic->link_param = 0xFF;
3804 	nic->channel_id_param = cpu_to_le16(0xF000);
3805 	nic->acpi_params = 0xFF;
3806 	nic->wol_param = 0x0F;
3807 	nic->tunnel_iface_count = 0xFFFF;
3808 	nic->direct_tenant_iface_count = 0xFFFF;
3809 	nic->bw_min = 0xFFFFFFFF;
3810 	nic->bw_max = 0xFFFFFFFF;
3811 }
3812 
3813 /* Mark all fields invalid */
3814 static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
3815 {
3816 	memset(pcie, 0, sizeof(*pcie));
3817 	pcie->sriov_state = 0xFF;
3818 	pcie->pf_state = 0xFF;
3819 	pcie->pf_type = 0xFF;
3820 	pcie->num_vfs = 0xFFFF;
3821 }
3822 
3823 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3824 		      u8 domain)
3825 {
3826 	struct be_nic_res_desc nic_desc;
3827 	u32 bw_percent;
3828 	u16 version = 0;
3829 
3830 	if (BE3_chip(adapter))
3831 		return be_cmd_set_qos(adapter, max_rate / 10, domain);
3832 
3833 	be_reset_nic_desc(&nic_desc);
3834 	nic_desc.pf_num = adapter->pf_number;
3835 	nic_desc.vf_num = domain;
3836 	nic_desc.bw_min = 0;
3837 	if (lancer_chip(adapter)) {
3838 		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3839 		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3840 		nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3841 					(1 << NOSV_SHIFT);
3842 		nic_desc.bw_max = cpu_to_le32(max_rate / 10);
3843 	} else {
3844 		version = 1;
3845 		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3846 		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3847 		nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3848 		bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
3849 		nic_desc.bw_max = cpu_to_le32(bw_percent);
3850 	}
3851 
3852 	return be_cmd_set_profile_config(adapter, &nic_desc,
3853 					 nic_desc.hdr.desc_len,
3854 					 1, version, domain);
3855 }
3856 
3857 static void be_fill_vf_res_template(struct be_adapter *adapter,
3858 				    struct be_resources pool_res,
3859 				    u16 num_vfs, u16 num_vf_qs,
3860 				    struct be_nic_res_desc *nic_vft)
3861 {
3862 	u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
3863 	struct be_resources res_mod = {0};
3864 
3865 	/* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3866 	 * which are modifiable using SET_PROFILE_CONFIG cmd.
3867 	 */
3868 	be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
3869 
3870 	/* If RSS IFACE capability flags are modifiable for a VF, set the
3871 	 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3872 	 * more than 1 RSSQ is available for a VF.
3873 	 * Otherwise, provision only 1 queue pair for VF.
3874 	 */
3875 	if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3876 		nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3877 		if (num_vf_qs > 1) {
3878 			vf_if_cap_flags |= BE_IF_FLAGS_RSS;
3879 			if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
3880 				vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
3881 		} else {
3882 			vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3883 					     BE_IF_FLAGS_DEFQ_RSS);
3884 		}
3885 
3886 		nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
3887 	} else {
3888 		num_vf_qs = 1;
3889 	}
3890 
3891 	nic_vft->rq_count = cpu_to_le16(num_vf_qs);
3892 	nic_vft->txq_count = cpu_to_le16(num_vf_qs);
3893 	nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
3894 	nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
3895 					(num_vfs + 1));
3896 
3897 	/* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
3898 	 * among the PF and it's VFs, if the fields are changeable
3899 	 */
3900 	if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
3901 		nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
3902 							 (num_vfs + 1));
3903 
3904 	if (res_mod.max_vlans == FIELD_MODIFIABLE)
3905 		nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
3906 						  (num_vfs + 1));
3907 
3908 	if (res_mod.max_iface_count == FIELD_MODIFIABLE)
3909 		nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
3910 						   (num_vfs + 1));
3911 
3912 	if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
3913 		nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
3914 						 (num_vfs + 1));
3915 }
3916 
3917 int be_cmd_set_sriov_config(struct be_adapter *adapter,
3918 			    struct be_resources pool_res, u16 num_vfs,
3919 			    u16 num_vf_qs)
3920 {
3921 	struct {
3922 		struct be_pcie_res_desc pcie;
3923 		struct be_nic_res_desc nic_vft;
3924 	} __packed desc;
3925 
3926 	/* PF PCIE descriptor */
3927 	be_reset_pcie_desc(&desc.pcie);
3928 	desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
3929 	desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3930 	desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
3931 	desc.pcie.pf_num = adapter->pdev->devfn;
3932 	desc.pcie.sriov_state = num_vfs ? 1 : 0;
3933 	desc.pcie.num_vfs = cpu_to_le16(num_vfs);
3934 
3935 	/* VF NIC Template descriptor */
3936 	be_reset_nic_desc(&desc.nic_vft);
3937 	desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3938 	desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3939 	desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
3940 	desc.nic_vft.pf_num = adapter->pdev->devfn;
3941 	desc.nic_vft.vf_num = 0;
3942 
3943 	be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
3944 				&desc.nic_vft);
3945 
3946 	return be_cmd_set_profile_config(adapter, &desc,
3947 					 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
3948 }
3949 
3950 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
3951 {
3952 	struct be_mcc_wrb *wrb;
3953 	struct be_cmd_req_manage_iface_filters *req;
3954 	int status;
3955 
3956 	if (iface == 0xFFFFFFFF)
3957 		return -1;
3958 
3959 	spin_lock_bh(&adapter->mcc_lock);
3960 
3961 	wrb = wrb_from_mccq(adapter);
3962 	if (!wrb) {
3963 		status = -EBUSY;
3964 		goto err;
3965 	}
3966 	req = embedded_payload(wrb);
3967 
3968 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3969 			       OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
3970 			       wrb, NULL);
3971 	req->op = op;
3972 	req->target_iface_id = cpu_to_le32(iface);
3973 
3974 	status = be_mcc_notify_wait(adapter);
3975 err:
3976 	spin_unlock_bh(&adapter->mcc_lock);
3977 	return status;
3978 }
3979 
3980 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
3981 {
3982 	struct be_port_res_desc port_desc;
3983 
3984 	memset(&port_desc, 0, sizeof(port_desc));
3985 	port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
3986 	port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3987 	port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3988 	port_desc.link_num = adapter->hba_port_num;
3989 	if (port) {
3990 		port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
3991 					(1 << RCVID_SHIFT);
3992 		port_desc.nv_port = swab16(port);
3993 	} else {
3994 		port_desc.nv_flags = NV_TYPE_DISABLED;
3995 		port_desc.nv_port = 0;
3996 	}
3997 
3998 	return be_cmd_set_profile_config(adapter, &port_desc,
3999 					 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
4000 }
4001 
4002 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
4003 		     int vf_num)
4004 {
4005 	struct be_mcc_wrb *wrb;
4006 	struct be_cmd_req_get_iface_list *req;
4007 	struct be_cmd_resp_get_iface_list *resp;
4008 	int status;
4009 
4010 	spin_lock_bh(&adapter->mcc_lock);
4011 
4012 	wrb = wrb_from_mccq(adapter);
4013 	if (!wrb) {
4014 		status = -EBUSY;
4015 		goto err;
4016 	}
4017 	req = embedded_payload(wrb);
4018 
4019 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4020 			       OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
4021 			       wrb, NULL);
4022 	req->hdr.domain = vf_num + 1;
4023 
4024 	status = be_mcc_notify_wait(adapter);
4025 	if (!status) {
4026 		resp = (struct be_cmd_resp_get_iface_list *)req;
4027 		vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
4028 	}
4029 
4030 err:
4031 	spin_unlock_bh(&adapter->mcc_lock);
4032 	return status;
4033 }
4034 
4035 static int lancer_wait_idle(struct be_adapter *adapter)
4036 {
4037 #define SLIPORT_IDLE_TIMEOUT 30
4038 	u32 reg_val;
4039 	int status = 0, i;
4040 
4041 	for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
4042 		reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
4043 		if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
4044 			break;
4045 
4046 		ssleep(1);
4047 	}
4048 
4049 	if (i == SLIPORT_IDLE_TIMEOUT)
4050 		status = -1;
4051 
4052 	return status;
4053 }
4054 
4055 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
4056 {
4057 	int status = 0;
4058 
4059 	status = lancer_wait_idle(adapter);
4060 	if (status)
4061 		return status;
4062 
4063 	iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
4064 
4065 	return status;
4066 }
4067 
4068 /* Routine to check whether dump image is present or not */
4069 bool dump_present(struct be_adapter *adapter)
4070 {
4071 	u32 sliport_status = 0;
4072 
4073 	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
4074 	return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
4075 }
4076 
4077 int lancer_initiate_dump(struct be_adapter *adapter)
4078 {
4079 	struct device *dev = &adapter->pdev->dev;
4080 	int status;
4081 
4082 	if (dump_present(adapter)) {
4083 		dev_info(dev, "Previous dump not cleared, not forcing dump\n");
4084 		return -EEXIST;
4085 	}
4086 
4087 	/* give firmware reset and diagnostic dump */
4088 	status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
4089 				     PHYSDEV_CONTROL_DD_MASK);
4090 	if (status < 0) {
4091 		dev_err(dev, "FW reset failed\n");
4092 		return status;
4093 	}
4094 
4095 	status = lancer_wait_idle(adapter);
4096 	if (status)
4097 		return status;
4098 
4099 	if (!dump_present(adapter)) {
4100 		dev_err(dev, "FW dump not generated\n");
4101 		return -EIO;
4102 	}
4103 
4104 	return 0;
4105 }
4106 
4107 int lancer_delete_dump(struct be_adapter *adapter)
4108 {
4109 	int status;
4110 
4111 	status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
4112 	return be_cmd_status(status);
4113 }
4114 
4115 /* Uses sync mcc */
4116 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
4117 {
4118 	struct be_mcc_wrb *wrb;
4119 	struct be_cmd_enable_disable_vf *req;
4120 	int status;
4121 
4122 	if (BEx_chip(adapter))
4123 		return 0;
4124 
4125 	spin_lock_bh(&adapter->mcc_lock);
4126 
4127 	wrb = wrb_from_mccq(adapter);
4128 	if (!wrb) {
4129 		status = -EBUSY;
4130 		goto err;
4131 	}
4132 
4133 	req = embedded_payload(wrb);
4134 
4135 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4136 			       OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
4137 			       wrb, NULL);
4138 
4139 	req->hdr.domain = domain;
4140 	req->enable = 1;
4141 	status = be_mcc_notify_wait(adapter);
4142 err:
4143 	spin_unlock_bh(&adapter->mcc_lock);
4144 	return status;
4145 }
4146 
4147 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
4148 {
4149 	struct be_mcc_wrb *wrb;
4150 	struct be_cmd_req_intr_set *req;
4151 	int status;
4152 
4153 	if (mutex_lock_interruptible(&adapter->mbox_lock))
4154 		return -1;
4155 
4156 	wrb = wrb_from_mbox(adapter);
4157 
4158 	req = embedded_payload(wrb);
4159 
4160 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4161 			       OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
4162 			       wrb, NULL);
4163 
4164 	req->intr_enabled = intr_enable;
4165 
4166 	status = be_mbox_notify_wait(adapter);
4167 
4168 	mutex_unlock(&adapter->mbox_lock);
4169 	return status;
4170 }
4171 
4172 /* Uses MBOX */
4173 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
4174 {
4175 	struct be_cmd_req_get_active_profile *req;
4176 	struct be_mcc_wrb *wrb;
4177 	int status;
4178 
4179 	if (mutex_lock_interruptible(&adapter->mbox_lock))
4180 		return -1;
4181 
4182 	wrb = wrb_from_mbox(adapter);
4183 	if (!wrb) {
4184 		status = -EBUSY;
4185 		goto err;
4186 	}
4187 
4188 	req = embedded_payload(wrb);
4189 
4190 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4191 			       OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
4192 			       wrb, NULL);
4193 
4194 	status = be_mbox_notify_wait(adapter);
4195 	if (!status) {
4196 		struct be_cmd_resp_get_active_profile *resp =
4197 							embedded_payload(wrb);
4198 
4199 		*profile_id = le16_to_cpu(resp->active_profile_id);
4200 	}
4201 
4202 err:
4203 	mutex_unlock(&adapter->mbox_lock);
4204 	return status;
4205 }
4206 
4207 int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4208 				   int link_state, u8 domain)
4209 {
4210 	struct be_mcc_wrb *wrb;
4211 	struct be_cmd_req_set_ll_link *req;
4212 	int status;
4213 
4214 	if (BEx_chip(adapter) || lancer_chip(adapter))
4215 		return -EOPNOTSUPP;
4216 
4217 	spin_lock_bh(&adapter->mcc_lock);
4218 
4219 	wrb = wrb_from_mccq(adapter);
4220 	if (!wrb) {
4221 		status = -EBUSY;
4222 		goto err;
4223 	}
4224 
4225 	req = embedded_payload(wrb);
4226 
4227 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4228 			       OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
4229 			       sizeof(*req), wrb, NULL);
4230 
4231 	req->hdr.version = 1;
4232 	req->hdr.domain = domain;
4233 
4234 	if (link_state == IFLA_VF_LINK_STATE_ENABLE)
4235 		req->link_config |= 1;
4236 
4237 	if (link_state == IFLA_VF_LINK_STATE_AUTO)
4238 		req->link_config |= 1 << PLINK_TRACK_SHIFT;
4239 
4240 	status = be_mcc_notify_wait(adapter);
4241 err:
4242 	spin_unlock_bh(&adapter->mcc_lock);
4243 	return status;
4244 }
4245 
4246 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
4247 		    int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
4248 {
4249 	struct be_adapter *adapter = netdev_priv(netdev_handle);
4250 	struct be_mcc_wrb *wrb;
4251 	struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
4252 	struct be_cmd_req_hdr *req;
4253 	struct be_cmd_resp_hdr *resp;
4254 	int status;
4255 
4256 	spin_lock_bh(&adapter->mcc_lock);
4257 
4258 	wrb = wrb_from_mccq(adapter);
4259 	if (!wrb) {
4260 		status = -EBUSY;
4261 		goto err;
4262 	}
4263 	req = embedded_payload(wrb);
4264 	resp = embedded_payload(wrb);
4265 
4266 	be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
4267 			       hdr->opcode, wrb_payload_size, wrb, NULL);
4268 	memcpy(req, wrb_payload, wrb_payload_size);
4269 	be_dws_cpu_to_le(req, wrb_payload_size);
4270 
4271 	status = be_mcc_notify_wait(adapter);
4272 	if (cmd_status)
4273 		*cmd_status = (status & 0xffff);
4274 	if (ext_status)
4275 		*ext_status = 0;
4276 	memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
4277 	be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
4278 err:
4279 	spin_unlock_bh(&adapter->mcc_lock);
4280 	return status;
4281 }
4282 EXPORT_SYMBOL(be_roce_mcc_cmd);
4283