1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17 
18 #include <linux/module.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 
22 static struct be_cmd_priv_map cmd_priv_map[] = {
23 	{
24 		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
25 		CMD_SUBSYSTEM_ETH,
26 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
28 	},
29 	{
30 		OPCODE_COMMON_GET_FLOW_CONTROL,
31 		CMD_SUBSYSTEM_COMMON,
32 		BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
34 	},
35 	{
36 		OPCODE_COMMON_SET_FLOW_CONTROL,
37 		CMD_SUBSYSTEM_COMMON,
38 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
40 	},
41 	{
42 		OPCODE_ETH_GET_PPORT_STATS,
43 		CMD_SUBSYSTEM_ETH,
44 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46 	},
47 	{
48 		OPCODE_COMMON_GET_PHY_DETAILS,
49 		CMD_SUBSYSTEM_COMMON,
50 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52 	}
53 };
54 
55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
56 			   u8 subsystem)
57 {
58 	int i;
59 	int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
60 	u32 cmd_privileges = adapter->cmd_privileges;
61 
62 	for (i = 0; i < num_entries; i++)
63 		if (opcode == cmd_priv_map[i].opcode &&
64 		    subsystem == cmd_priv_map[i].subsystem)
65 			if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
66 				return false;
67 
68 	return true;
69 }
70 
71 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
72 {
73 	return wrb->payload.embedded_payload;
74 }
75 
76 static void be_mcc_notify(struct be_adapter *adapter)
77 {
78 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
79 	u32 val = 0;
80 
81 	if (be_error(adapter))
82 		return;
83 
84 	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
85 	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
86 
87 	wmb();
88 	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
89 }
90 
91 /* To check if valid bit is set, check the entire word as we don't know
92  * the endianness of the data (old entry is host endian while a new entry is
93  * little endian) */
94 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
95 {
96 	u32 flags;
97 
98 	if (compl->flags != 0) {
99 		flags = le32_to_cpu(compl->flags);
100 		if (flags & CQE_FLAGS_VALID_MASK) {
101 			compl->flags = flags;
102 			return true;
103 		}
104 	}
105 	return false;
106 }
107 
108 /* Need to reset the entire word that houses the valid bit */
109 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
110 {
111 	compl->flags = 0;
112 }
113 
114 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
115 {
116 	unsigned long addr;
117 
118 	addr = tag1;
119 	addr = ((addr << 16) << 16) | tag0;
120 	return (void *)addr;
121 }
122 
123 static int be_mcc_compl_process(struct be_adapter *adapter,
124 				struct be_mcc_compl *compl)
125 {
126 	u16 compl_status, extd_status;
127 	struct be_cmd_resp_hdr *resp_hdr;
128 	u8 opcode = 0, subsystem = 0;
129 
130 	/* Just swap the status to host endian; mcc tag is opaquely copied
131 	 * from mcc_wrb */
132 	be_dws_le_to_cpu(compl, 4);
133 
134 	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
135 				CQE_STATUS_COMPL_MASK;
136 
137 	resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
138 
139 	if (resp_hdr) {
140 		opcode = resp_hdr->opcode;
141 		subsystem = resp_hdr->subsystem;
142 	}
143 
144 	if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
145 	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
146 		complete(&adapter->et_cmd_compl);
147 		return 0;
148 	}
149 
150 	if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
151 	     (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
152 	    (subsystem == CMD_SUBSYSTEM_COMMON)) {
153 		adapter->flash_status = compl_status;
154 		complete(&adapter->et_cmd_compl);
155 	}
156 
157 	if (compl_status == MCC_STATUS_SUCCESS) {
158 		if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
159 		     (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
160 		    (subsystem == CMD_SUBSYSTEM_ETH)) {
161 			be_parse_stats(adapter);
162 			adapter->stats_cmd_sent = false;
163 		}
164 		if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
165 		    subsystem == CMD_SUBSYSTEM_COMMON) {
166 			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
167 				(void *)resp_hdr;
168 			adapter->drv_stats.be_on_die_temperature =
169 				resp->on_die_temperature;
170 		}
171 	} else {
172 		if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
173 			adapter->be_get_temp_freq = 0;
174 
175 		if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
176 			compl_status == MCC_STATUS_ILLEGAL_REQUEST)
177 			goto done;
178 
179 		if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
180 			dev_warn(&adapter->pdev->dev,
181 				 "VF is not privileged to issue opcode %d-%d\n",
182 				 opcode, subsystem);
183 		} else {
184 			extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
185 					CQE_STATUS_EXTD_MASK;
186 			dev_err(&adapter->pdev->dev,
187 				"opcode %d-%d failed:status %d-%d\n",
188 				opcode, subsystem, compl_status, extd_status);
189 
190 			if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
191 				return extd_status;
192 		}
193 	}
194 done:
195 	return compl_status;
196 }
197 
198 /* Link state evt is a string of bytes; no need for endian swapping */
199 static void be_async_link_state_process(struct be_adapter *adapter,
200 		struct be_async_event_link_state *evt)
201 {
202 	/* When link status changes, link speed must be re-queried from FW */
203 	adapter->phy.link_speed = -1;
204 
205 	/* Ignore physical link event */
206 	if (lancer_chip(adapter) &&
207 	    !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
208 		return;
209 
210 	/* For the initial link status do not rely on the ASYNC event as
211 	 * it may not be received in some cases.
212 	 */
213 	if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
214 		be_link_status_update(adapter, evt->port_link_status);
215 }
216 
217 /* Grp5 CoS Priority evt */
218 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
219 		struct be_async_event_grp5_cos_priority *evt)
220 {
221 	if (evt->valid) {
222 		adapter->vlan_prio_bmap = evt->available_priority_bmap;
223 		adapter->recommended_prio &= ~VLAN_PRIO_MASK;
224 		adapter->recommended_prio =
225 			evt->reco_default_priority << VLAN_PRIO_SHIFT;
226 	}
227 }
228 
229 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
230 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
231 		struct be_async_event_grp5_qos_link_speed *evt)
232 {
233 	if (adapter->phy.link_speed >= 0 &&
234 	    evt->physical_port == adapter->port_num)
235 		adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
236 }
237 
238 /*Grp5 PVID evt*/
239 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
240 		struct be_async_event_grp5_pvid_state *evt)
241 {
242 	if (evt->enabled)
243 		adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
244 	else
245 		adapter->pvid = 0;
246 }
247 
248 static void be_async_grp5_evt_process(struct be_adapter *adapter,
249 		u32 trailer, struct be_mcc_compl *evt)
250 {
251 	u8 event_type = 0;
252 
253 	event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
254 		ASYNC_TRAILER_EVENT_TYPE_MASK;
255 
256 	switch (event_type) {
257 	case ASYNC_EVENT_COS_PRIORITY:
258 		be_async_grp5_cos_priority_process(adapter,
259 		(struct be_async_event_grp5_cos_priority *)evt);
260 	break;
261 	case ASYNC_EVENT_QOS_SPEED:
262 		be_async_grp5_qos_speed_process(adapter,
263 		(struct be_async_event_grp5_qos_link_speed *)evt);
264 	break;
265 	case ASYNC_EVENT_PVID_STATE:
266 		be_async_grp5_pvid_state_process(adapter,
267 		(struct be_async_event_grp5_pvid_state *)evt);
268 	break;
269 	default:
270 		dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
271 			 event_type);
272 		break;
273 	}
274 }
275 
276 static void be_async_dbg_evt_process(struct be_adapter *adapter,
277 		u32 trailer, struct be_mcc_compl *cmp)
278 {
279 	u8 event_type = 0;
280 	struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
281 
282 	event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
283 		ASYNC_TRAILER_EVENT_TYPE_MASK;
284 
285 	switch (event_type) {
286 	case ASYNC_DEBUG_EVENT_TYPE_QNQ:
287 		if (evt->valid)
288 			adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
289 		adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
290 	break;
291 	default:
292 		dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
293 			 event_type);
294 	break;
295 	}
296 }
297 
298 static inline bool is_link_state_evt(u32 trailer)
299 {
300 	return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
301 		ASYNC_TRAILER_EVENT_CODE_MASK) ==
302 				ASYNC_EVENT_CODE_LINK_STATE;
303 }
304 
305 static inline bool is_grp5_evt(u32 trailer)
306 {
307 	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
308 		ASYNC_TRAILER_EVENT_CODE_MASK) ==
309 				ASYNC_EVENT_CODE_GRP_5);
310 }
311 
312 static inline bool is_dbg_evt(u32 trailer)
313 {
314 	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
315 		ASYNC_TRAILER_EVENT_CODE_MASK) ==
316 				ASYNC_EVENT_CODE_QNQ);
317 }
318 
319 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
320 {
321 	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
322 	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
323 
324 	if (be_mcc_compl_is_new(compl)) {
325 		queue_tail_inc(mcc_cq);
326 		return compl;
327 	}
328 	return NULL;
329 }
330 
331 void be_async_mcc_enable(struct be_adapter *adapter)
332 {
333 	spin_lock_bh(&adapter->mcc_cq_lock);
334 
335 	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
336 	adapter->mcc_obj.rearm_cq = true;
337 
338 	spin_unlock_bh(&adapter->mcc_cq_lock);
339 }
340 
341 void be_async_mcc_disable(struct be_adapter *adapter)
342 {
343 	spin_lock_bh(&adapter->mcc_cq_lock);
344 
345 	adapter->mcc_obj.rearm_cq = false;
346 	be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
347 
348 	spin_unlock_bh(&adapter->mcc_cq_lock);
349 }
350 
351 int be_process_mcc(struct be_adapter *adapter)
352 {
353 	struct be_mcc_compl *compl;
354 	int num = 0, status = 0;
355 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
356 
357 	spin_lock(&adapter->mcc_cq_lock);
358 	while ((compl = be_mcc_compl_get(adapter))) {
359 		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
360 			/* Interpret flags as an async trailer */
361 			if (is_link_state_evt(compl->flags))
362 				be_async_link_state_process(adapter,
363 				(struct be_async_event_link_state *) compl);
364 			else if (is_grp5_evt(compl->flags))
365 				be_async_grp5_evt_process(adapter,
366 				compl->flags, compl);
367 			else if (is_dbg_evt(compl->flags))
368 				be_async_dbg_evt_process(adapter,
369 				compl->flags, compl);
370 		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
371 				status = be_mcc_compl_process(adapter, compl);
372 				atomic_dec(&mcc_obj->q.used);
373 		}
374 		be_mcc_compl_use(compl);
375 		num++;
376 	}
377 
378 	if (num)
379 		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
380 
381 	spin_unlock(&adapter->mcc_cq_lock);
382 	return status;
383 }
384 
385 /* Wait till no more pending mcc requests are present */
386 static int be_mcc_wait_compl(struct be_adapter *adapter)
387 {
388 #define mcc_timeout		120000 /* 12s timeout */
389 	int i, status = 0;
390 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
391 
392 	for (i = 0; i < mcc_timeout; i++) {
393 		if (be_error(adapter))
394 			return -EIO;
395 
396 		local_bh_disable();
397 		status = be_process_mcc(adapter);
398 		local_bh_enable();
399 
400 		if (atomic_read(&mcc_obj->q.used) == 0)
401 			break;
402 		udelay(100);
403 	}
404 	if (i == mcc_timeout) {
405 		dev_err(&adapter->pdev->dev, "FW not responding\n");
406 		adapter->fw_timeout = true;
407 		return -EIO;
408 	}
409 	return status;
410 }
411 
412 /* Notify MCC requests and wait for completion */
413 static int be_mcc_notify_wait(struct be_adapter *adapter)
414 {
415 	int status;
416 	struct be_mcc_wrb *wrb;
417 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
418 	u16 index = mcc_obj->q.head;
419 	struct be_cmd_resp_hdr *resp;
420 
421 	index_dec(&index, mcc_obj->q.len);
422 	wrb = queue_index_node(&mcc_obj->q, index);
423 
424 	resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
425 
426 	be_mcc_notify(adapter);
427 
428 	status = be_mcc_wait_compl(adapter);
429 	if (status == -EIO)
430 		goto out;
431 
432 	status = resp->status;
433 out:
434 	return status;
435 }
436 
437 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
438 {
439 	int msecs = 0;
440 	u32 ready;
441 
442 	do {
443 		if (be_error(adapter))
444 			return -EIO;
445 
446 		ready = ioread32(db);
447 		if (ready == 0xffffffff)
448 			return -1;
449 
450 		ready &= MPU_MAILBOX_DB_RDY_MASK;
451 		if (ready)
452 			break;
453 
454 		if (msecs > 4000) {
455 			dev_err(&adapter->pdev->dev, "FW not responding\n");
456 			adapter->fw_timeout = true;
457 			be_detect_error(adapter);
458 			return -1;
459 		}
460 
461 		msleep(1);
462 		msecs++;
463 	} while (true);
464 
465 	return 0;
466 }
467 
468 /*
469  * Insert the mailbox address into the doorbell in two steps
470  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
471  */
472 static int be_mbox_notify_wait(struct be_adapter *adapter)
473 {
474 	int status;
475 	u32 val = 0;
476 	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
477 	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
478 	struct be_mcc_mailbox *mbox = mbox_mem->va;
479 	struct be_mcc_compl *compl = &mbox->compl;
480 
481 	/* wait for ready to be set */
482 	status = be_mbox_db_ready_wait(adapter, db);
483 	if (status != 0)
484 		return status;
485 
486 	val |= MPU_MAILBOX_DB_HI_MASK;
487 	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
488 	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
489 	iowrite32(val, db);
490 
491 	/* wait for ready to be set */
492 	status = be_mbox_db_ready_wait(adapter, db);
493 	if (status != 0)
494 		return status;
495 
496 	val = 0;
497 	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
498 	val |= (u32)(mbox_mem->dma >> 4) << 2;
499 	iowrite32(val, db);
500 
501 	status = be_mbox_db_ready_wait(adapter, db);
502 	if (status != 0)
503 		return status;
504 
505 	/* A cq entry has been made now */
506 	if (be_mcc_compl_is_new(compl)) {
507 		status = be_mcc_compl_process(adapter, &mbox->compl);
508 		be_mcc_compl_use(compl);
509 		if (status)
510 			return status;
511 	} else {
512 		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
513 		return -1;
514 	}
515 	return 0;
516 }
517 
518 static u16 be_POST_stage_get(struct be_adapter *adapter)
519 {
520 	u32 sem;
521 
522 	if (BEx_chip(adapter))
523 		sem  = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
524 	else
525 		pci_read_config_dword(adapter->pdev,
526 				      SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
527 
528 	return sem & POST_STAGE_MASK;
529 }
530 
531 static int lancer_wait_ready(struct be_adapter *adapter)
532 {
533 #define SLIPORT_READY_TIMEOUT 30
534 	u32 sliport_status;
535 	int status = 0, i;
536 
537 	for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
538 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
539 		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
540 			break;
541 
542 		msleep(1000);
543 	}
544 
545 	if (i == SLIPORT_READY_TIMEOUT)
546 		status = -1;
547 
548 	return status;
549 }
550 
551 static bool lancer_provisioning_error(struct be_adapter *adapter)
552 {
553 	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
554 	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
555 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
556 		sliport_err1 = ioread32(adapter->db +
557 					SLIPORT_ERROR1_OFFSET);
558 		sliport_err2 = ioread32(adapter->db +
559 					SLIPORT_ERROR2_OFFSET);
560 
561 		if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
562 		    sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
563 			return true;
564 	}
565 	return false;
566 }
567 
568 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
569 {
570 	int status;
571 	u32 sliport_status, err, reset_needed;
572 	bool resource_error;
573 
574 	resource_error = lancer_provisioning_error(adapter);
575 	if (resource_error)
576 		return -EAGAIN;
577 
578 	status = lancer_wait_ready(adapter);
579 	if (!status) {
580 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
581 		err = sliport_status & SLIPORT_STATUS_ERR_MASK;
582 		reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
583 		if (err && reset_needed) {
584 			iowrite32(SLI_PORT_CONTROL_IP_MASK,
585 				  adapter->db + SLIPORT_CONTROL_OFFSET);
586 
587 			/* check adapter has corrected the error */
588 			status = lancer_wait_ready(adapter);
589 			sliport_status = ioread32(adapter->db +
590 						  SLIPORT_STATUS_OFFSET);
591 			sliport_status &= (SLIPORT_STATUS_ERR_MASK |
592 						SLIPORT_STATUS_RN_MASK);
593 			if (status || sliport_status)
594 				status = -1;
595 		} else if (err || reset_needed) {
596 			status = -1;
597 		}
598 	}
599 	/* Stop error recovery if error is not recoverable.
600 	 * No resource error is temporary errors and will go away
601 	 * when PF provisions resources.
602 	 */
603 	resource_error = lancer_provisioning_error(adapter);
604 	if (resource_error)
605 		status = -EAGAIN;
606 
607 	return status;
608 }
609 
610 int be_fw_wait_ready(struct be_adapter *adapter)
611 {
612 	u16 stage;
613 	int status, timeout = 0;
614 	struct device *dev = &adapter->pdev->dev;
615 
616 	if (lancer_chip(adapter)) {
617 		status = lancer_wait_ready(adapter);
618 		return status;
619 	}
620 
621 	do {
622 		stage = be_POST_stage_get(adapter);
623 		if (stage == POST_STAGE_ARMFW_RDY)
624 			return 0;
625 
626 		dev_info(dev, "Waiting for POST, %ds elapsed\n",
627 			 timeout);
628 		if (msleep_interruptible(2000)) {
629 			dev_err(dev, "Waiting for POST aborted\n");
630 			return -EINTR;
631 		}
632 		timeout += 2;
633 	} while (timeout < 60);
634 
635 	dev_err(dev, "POST timeout; stage=0x%x\n", stage);
636 	return -1;
637 }
638 
639 
640 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
641 {
642 	return &wrb->payload.sgl[0];
643 }
644 
645 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
646 				 unsigned long addr)
647 {
648 	wrb->tag0 = addr & 0xFFFFFFFF;
649 	wrb->tag1 = upper_32_bits(addr);
650 }
651 
652 /* Don't touch the hdr after it's prepared */
653 /* mem will be NULL for embedded commands */
654 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
655 				u8 subsystem, u8 opcode, int cmd_len,
656 				struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
657 {
658 	struct be_sge *sge;
659 
660 	req_hdr->opcode = opcode;
661 	req_hdr->subsystem = subsystem;
662 	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
663 	req_hdr->version = 0;
664 	fill_wrb_tags(wrb, (ulong) req_hdr);
665 	wrb->payload_length = cmd_len;
666 	if (mem) {
667 		wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
668 			MCC_WRB_SGE_CNT_SHIFT;
669 		sge = nonembedded_sgl(wrb);
670 		sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
671 		sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
672 		sge->len = cpu_to_le32(mem->size);
673 	} else
674 		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
675 	be_dws_cpu_to_le(wrb, 8);
676 }
677 
678 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
679 			struct be_dma_mem *mem)
680 {
681 	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
682 	u64 dma = (u64)mem->dma;
683 
684 	for (i = 0; i < buf_pages; i++) {
685 		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
686 		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
687 		dma += PAGE_SIZE_4K;
688 	}
689 }
690 
691 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
692 {
693 	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
694 	struct be_mcc_wrb *wrb
695 		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
696 	memset(wrb, 0, sizeof(*wrb));
697 	return wrb;
698 }
699 
700 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
701 {
702 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
703 	struct be_mcc_wrb *wrb;
704 
705 	if (!mccq->created)
706 		return NULL;
707 
708 	if (atomic_read(&mccq->used) >= mccq->len)
709 		return NULL;
710 
711 	wrb = queue_head_node(mccq);
712 	queue_head_inc(mccq);
713 	atomic_inc(&mccq->used);
714 	memset(wrb, 0, sizeof(*wrb));
715 	return wrb;
716 }
717 
718 static bool use_mcc(struct be_adapter *adapter)
719 {
720 	return adapter->mcc_obj.q.created;
721 }
722 
723 /* Must be used only in process context */
724 static int be_cmd_lock(struct be_adapter *adapter)
725 {
726 	if (use_mcc(adapter)) {
727 		spin_lock_bh(&adapter->mcc_lock);
728 		return 0;
729 	} else {
730 		return mutex_lock_interruptible(&adapter->mbox_lock);
731 	}
732 }
733 
734 /* Must be used only in process context */
735 static void be_cmd_unlock(struct be_adapter *adapter)
736 {
737 	if (use_mcc(adapter))
738 		spin_unlock_bh(&adapter->mcc_lock);
739 	else
740 		return mutex_unlock(&adapter->mbox_lock);
741 }
742 
743 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
744 				      struct be_mcc_wrb *wrb)
745 {
746 	struct be_mcc_wrb *dest_wrb;
747 
748 	if (use_mcc(adapter)) {
749 		dest_wrb = wrb_from_mccq(adapter);
750 		if (!dest_wrb)
751 			return NULL;
752 	} else {
753 		dest_wrb = wrb_from_mbox(adapter);
754 	}
755 
756 	memcpy(dest_wrb, wrb, sizeof(*wrb));
757 	if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
758 		fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
759 
760 	return dest_wrb;
761 }
762 
763 /* Must be used only in process context */
764 static int be_cmd_notify_wait(struct be_adapter *adapter,
765 			      struct be_mcc_wrb *wrb)
766 {
767 	struct be_mcc_wrb *dest_wrb;
768 	int status;
769 
770 	status = be_cmd_lock(adapter);
771 	if (status)
772 		return status;
773 
774 	dest_wrb = be_cmd_copy(adapter, wrb);
775 	if (!dest_wrb)
776 		return -EBUSY;
777 
778 	if (use_mcc(adapter))
779 		status = be_mcc_notify_wait(adapter);
780 	else
781 		status = be_mbox_notify_wait(adapter);
782 
783 	if (!status)
784 		memcpy(wrb, dest_wrb, sizeof(*wrb));
785 
786 	be_cmd_unlock(adapter);
787 	return status;
788 }
789 
790 /* Tell fw we're about to start firing cmds by writing a
791  * special pattern across the wrb hdr; uses mbox
792  */
793 int be_cmd_fw_init(struct be_adapter *adapter)
794 {
795 	u8 *wrb;
796 	int status;
797 
798 	if (lancer_chip(adapter))
799 		return 0;
800 
801 	if (mutex_lock_interruptible(&adapter->mbox_lock))
802 		return -1;
803 
804 	wrb = (u8 *)wrb_from_mbox(adapter);
805 	*wrb++ = 0xFF;
806 	*wrb++ = 0x12;
807 	*wrb++ = 0x34;
808 	*wrb++ = 0xFF;
809 	*wrb++ = 0xFF;
810 	*wrb++ = 0x56;
811 	*wrb++ = 0x78;
812 	*wrb = 0xFF;
813 
814 	status = be_mbox_notify_wait(adapter);
815 
816 	mutex_unlock(&adapter->mbox_lock);
817 	return status;
818 }
819 
820 /* Tell fw we're done with firing cmds by writing a
821  * special pattern across the wrb hdr; uses mbox
822  */
823 int be_cmd_fw_clean(struct be_adapter *adapter)
824 {
825 	u8 *wrb;
826 	int status;
827 
828 	if (lancer_chip(adapter))
829 		return 0;
830 
831 	if (mutex_lock_interruptible(&adapter->mbox_lock))
832 		return -1;
833 
834 	wrb = (u8 *)wrb_from_mbox(adapter);
835 	*wrb++ = 0xFF;
836 	*wrb++ = 0xAA;
837 	*wrb++ = 0xBB;
838 	*wrb++ = 0xFF;
839 	*wrb++ = 0xFF;
840 	*wrb++ = 0xCC;
841 	*wrb++ = 0xDD;
842 	*wrb = 0xFF;
843 
844 	status = be_mbox_notify_wait(adapter);
845 
846 	mutex_unlock(&adapter->mbox_lock);
847 	return status;
848 }
849 
850 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
851 {
852 	struct be_mcc_wrb *wrb;
853 	struct be_cmd_req_eq_create *req;
854 	struct be_dma_mem *q_mem = &eqo->q.dma_mem;
855 	int status, ver = 0;
856 
857 	if (mutex_lock_interruptible(&adapter->mbox_lock))
858 		return -1;
859 
860 	wrb = wrb_from_mbox(adapter);
861 	req = embedded_payload(wrb);
862 
863 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
864 		OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
865 
866 	/* Support for EQ_CREATEv2 available only SH-R onwards */
867 	if (!(BEx_chip(adapter) || lancer_chip(adapter)))
868 		ver = 2;
869 
870 	req->hdr.version = ver;
871 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
872 
873 	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
874 	/* 4byte eqe*/
875 	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
876 	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
877 		      __ilog2_u32(eqo->q.len / 256));
878 	be_dws_cpu_to_le(req->context, sizeof(req->context));
879 
880 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
881 
882 	status = be_mbox_notify_wait(adapter);
883 	if (!status) {
884 		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
885 		eqo->q.id = le16_to_cpu(resp->eq_id);
886 		eqo->msix_idx =
887 			(ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
888 		eqo->q.created = true;
889 	}
890 
891 	mutex_unlock(&adapter->mbox_lock);
892 	return status;
893 }
894 
895 /* Use MCC */
896 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
897 			  bool permanent, u32 if_handle, u32 pmac_id)
898 {
899 	struct be_mcc_wrb *wrb;
900 	struct be_cmd_req_mac_query *req;
901 	int status;
902 
903 	spin_lock_bh(&adapter->mcc_lock);
904 
905 	wrb = wrb_from_mccq(adapter);
906 	if (!wrb) {
907 		status = -EBUSY;
908 		goto err;
909 	}
910 	req = embedded_payload(wrb);
911 
912 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
913 		OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
914 	req->type = MAC_ADDRESS_TYPE_NETWORK;
915 	if (permanent) {
916 		req->permanent = 1;
917 	} else {
918 		req->if_id = cpu_to_le16((u16) if_handle);
919 		req->pmac_id = cpu_to_le32(pmac_id);
920 		req->permanent = 0;
921 	}
922 
923 	status = be_mcc_notify_wait(adapter);
924 	if (!status) {
925 		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
926 		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
927 	}
928 
929 err:
930 	spin_unlock_bh(&adapter->mcc_lock);
931 	return status;
932 }
933 
934 /* Uses synchronous MCCQ */
935 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
936 		u32 if_id, u32 *pmac_id, u32 domain)
937 {
938 	struct be_mcc_wrb *wrb;
939 	struct be_cmd_req_pmac_add *req;
940 	int status;
941 
942 	spin_lock_bh(&adapter->mcc_lock);
943 
944 	wrb = wrb_from_mccq(adapter);
945 	if (!wrb) {
946 		status = -EBUSY;
947 		goto err;
948 	}
949 	req = embedded_payload(wrb);
950 
951 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
952 		OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
953 
954 	req->hdr.domain = domain;
955 	req->if_id = cpu_to_le32(if_id);
956 	memcpy(req->mac_address, mac_addr, ETH_ALEN);
957 
958 	status = be_mcc_notify_wait(adapter);
959 	if (!status) {
960 		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
961 		*pmac_id = le32_to_cpu(resp->pmac_id);
962 	}
963 
964 err:
965 	spin_unlock_bh(&adapter->mcc_lock);
966 
967 	 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
968 		status = -EPERM;
969 
970 	return status;
971 }
972 
973 /* Uses synchronous MCCQ */
974 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
975 {
976 	struct be_mcc_wrb *wrb;
977 	struct be_cmd_req_pmac_del *req;
978 	int status;
979 
980 	if (pmac_id == -1)
981 		return 0;
982 
983 	spin_lock_bh(&adapter->mcc_lock);
984 
985 	wrb = wrb_from_mccq(adapter);
986 	if (!wrb) {
987 		status = -EBUSY;
988 		goto err;
989 	}
990 	req = embedded_payload(wrb);
991 
992 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
993 		OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
994 
995 	req->hdr.domain = dom;
996 	req->if_id = cpu_to_le32(if_id);
997 	req->pmac_id = cpu_to_le32(pmac_id);
998 
999 	status = be_mcc_notify_wait(adapter);
1000 
1001 err:
1002 	spin_unlock_bh(&adapter->mcc_lock);
1003 	return status;
1004 }
1005 
1006 /* Uses Mbox */
1007 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1008 		struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1009 {
1010 	struct be_mcc_wrb *wrb;
1011 	struct be_cmd_req_cq_create *req;
1012 	struct be_dma_mem *q_mem = &cq->dma_mem;
1013 	void *ctxt;
1014 	int status;
1015 
1016 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1017 		return -1;
1018 
1019 	wrb = wrb_from_mbox(adapter);
1020 	req = embedded_payload(wrb);
1021 	ctxt = &req->context;
1022 
1023 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1024 		OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
1025 
1026 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1027 
1028 	if (BEx_chip(adapter)) {
1029 		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1030 								coalesce_wm);
1031 		AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1032 								ctxt, no_delay);
1033 		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1034 						__ilog2_u32(cq->len/256));
1035 		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1036 		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1037 		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1038 	} else {
1039 		req->hdr.version = 2;
1040 		req->page_size = 1; /* 1 for 4K */
1041 
1042 		/* coalesce-wm field in this cmd is not relevant to Lancer.
1043 		 * Lancer uses COMMON_MODIFY_CQ to set this field
1044 		 */
1045 		if (!lancer_chip(adapter))
1046 			AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1047 				      ctxt, coalesce_wm);
1048 		AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1049 								no_delay);
1050 		AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1051 						__ilog2_u32(cq->len/256));
1052 		AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1053 		AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
1054 								ctxt, 1);
1055 		AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
1056 								ctxt, eq->id);
1057 	}
1058 
1059 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1060 
1061 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1062 
1063 	status = be_mbox_notify_wait(adapter);
1064 	if (!status) {
1065 		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1066 		cq->id = le16_to_cpu(resp->cq_id);
1067 		cq->created = true;
1068 	}
1069 
1070 	mutex_unlock(&adapter->mbox_lock);
1071 
1072 	return status;
1073 }
1074 
1075 static u32 be_encoded_q_len(int q_len)
1076 {
1077 	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1078 	if (len_encoded == 16)
1079 		len_encoded = 0;
1080 	return len_encoded;
1081 }
1082 
1083 static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1084 				struct be_queue_info *mccq,
1085 				struct be_queue_info *cq)
1086 {
1087 	struct be_mcc_wrb *wrb;
1088 	struct be_cmd_req_mcc_ext_create *req;
1089 	struct be_dma_mem *q_mem = &mccq->dma_mem;
1090 	void *ctxt;
1091 	int status;
1092 
1093 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1094 		return -1;
1095 
1096 	wrb = wrb_from_mbox(adapter);
1097 	req = embedded_payload(wrb);
1098 	ctxt = &req->context;
1099 
1100 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1101 			OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
1102 
1103 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1104 	if (lancer_chip(adapter)) {
1105 		req->hdr.version = 1;
1106 		req->cq_id = cpu_to_le16(cq->id);
1107 
1108 		AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
1109 						be_encoded_q_len(mccq->len));
1110 		AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
1111 		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
1112 								ctxt, cq->id);
1113 		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
1114 								 ctxt, 1);
1115 
1116 	} else {
1117 		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1118 		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1119 						be_encoded_q_len(mccq->len));
1120 		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1121 	}
1122 
1123 	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1124 	req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1125 	req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
1126 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1127 
1128 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1129 
1130 	status = be_mbox_notify_wait(adapter);
1131 	if (!status) {
1132 		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1133 		mccq->id = le16_to_cpu(resp->id);
1134 		mccq->created = true;
1135 	}
1136 	mutex_unlock(&adapter->mbox_lock);
1137 
1138 	return status;
1139 }
1140 
1141 static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1142 				struct be_queue_info *mccq,
1143 				struct be_queue_info *cq)
1144 {
1145 	struct be_mcc_wrb *wrb;
1146 	struct be_cmd_req_mcc_create *req;
1147 	struct be_dma_mem *q_mem = &mccq->dma_mem;
1148 	void *ctxt;
1149 	int status;
1150 
1151 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1152 		return -1;
1153 
1154 	wrb = wrb_from_mbox(adapter);
1155 	req = embedded_payload(wrb);
1156 	ctxt = &req->context;
1157 
1158 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1159 			OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
1160 
1161 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1162 
1163 	AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1164 	AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1165 			be_encoded_q_len(mccq->len));
1166 	AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1167 
1168 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1169 
1170 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1171 
1172 	status = be_mbox_notify_wait(adapter);
1173 	if (!status) {
1174 		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1175 		mccq->id = le16_to_cpu(resp->id);
1176 		mccq->created = true;
1177 	}
1178 
1179 	mutex_unlock(&adapter->mbox_lock);
1180 	return status;
1181 }
1182 
1183 int be_cmd_mccq_create(struct be_adapter *adapter,
1184 			struct be_queue_info *mccq,
1185 			struct be_queue_info *cq)
1186 {
1187 	int status;
1188 
1189 	status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1190 	if (status && !lancer_chip(adapter)) {
1191 		dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1192 			"or newer to avoid conflicting priorities between NIC "
1193 			"and FCoE traffic");
1194 		status = be_cmd_mccq_org_create(adapter, mccq, cq);
1195 	}
1196 	return status;
1197 }
1198 
1199 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1200 {
1201 	struct be_mcc_wrb wrb = {0};
1202 	struct be_cmd_req_eth_tx_create *req;
1203 	struct be_queue_info *txq = &txo->q;
1204 	struct be_queue_info *cq = &txo->cq;
1205 	struct be_dma_mem *q_mem = &txq->dma_mem;
1206 	int status, ver = 0;
1207 
1208 	req = embedded_payload(&wrb);
1209 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1210 				OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1211 
1212 	if (lancer_chip(adapter)) {
1213 		req->hdr.version = 1;
1214 	} else if (BEx_chip(adapter)) {
1215 		if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1216 			req->hdr.version = 2;
1217 	} else { /* For SH */
1218 		req->hdr.version = 2;
1219 	}
1220 
1221 	if (req->hdr.version > 0)
1222 		req->if_id = cpu_to_le16(adapter->if_handle);
1223 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1224 	req->ulp_num = BE_ULP1_NUM;
1225 	req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1226 	req->cq_id = cpu_to_le16(cq->id);
1227 	req->queue_size = be_encoded_q_len(txq->len);
1228 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1229 	ver = req->hdr.version;
1230 
1231 	status = be_cmd_notify_wait(adapter, &wrb);
1232 	if (!status) {
1233 		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1234 		txq->id = le16_to_cpu(resp->cid);
1235 		if (ver == 2)
1236 			txo->db_offset = le32_to_cpu(resp->db_offset);
1237 		else
1238 			txo->db_offset = DB_TXULP1_OFFSET;
1239 		txq->created = true;
1240 	}
1241 
1242 	return status;
1243 }
1244 
1245 /* Uses MCC */
1246 int be_cmd_rxq_create(struct be_adapter *adapter,
1247 		struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1248 		u32 if_id, u32 rss, u8 *rss_id)
1249 {
1250 	struct be_mcc_wrb *wrb;
1251 	struct be_cmd_req_eth_rx_create *req;
1252 	struct be_dma_mem *q_mem = &rxq->dma_mem;
1253 	int status;
1254 
1255 	spin_lock_bh(&adapter->mcc_lock);
1256 
1257 	wrb = wrb_from_mccq(adapter);
1258 	if (!wrb) {
1259 		status = -EBUSY;
1260 		goto err;
1261 	}
1262 	req = embedded_payload(wrb);
1263 
1264 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1265 				OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1266 
1267 	req->cq_id = cpu_to_le16(cq_id);
1268 	req->frag_size = fls(frag_size) - 1;
1269 	req->num_pages = 2;
1270 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1271 	req->interface_id = cpu_to_le32(if_id);
1272 	req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1273 	req->rss_queue = cpu_to_le32(rss);
1274 
1275 	status = be_mcc_notify_wait(adapter);
1276 	if (!status) {
1277 		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1278 		rxq->id = le16_to_cpu(resp->id);
1279 		rxq->created = true;
1280 		*rss_id = resp->rss_id;
1281 	}
1282 
1283 err:
1284 	spin_unlock_bh(&adapter->mcc_lock);
1285 	return status;
1286 }
1287 
1288 /* Generic destroyer function for all types of queues
1289  * Uses Mbox
1290  */
1291 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1292 		int queue_type)
1293 {
1294 	struct be_mcc_wrb *wrb;
1295 	struct be_cmd_req_q_destroy *req;
1296 	u8 subsys = 0, opcode = 0;
1297 	int status;
1298 
1299 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1300 		return -1;
1301 
1302 	wrb = wrb_from_mbox(adapter);
1303 	req = embedded_payload(wrb);
1304 
1305 	switch (queue_type) {
1306 	case QTYPE_EQ:
1307 		subsys = CMD_SUBSYSTEM_COMMON;
1308 		opcode = OPCODE_COMMON_EQ_DESTROY;
1309 		break;
1310 	case QTYPE_CQ:
1311 		subsys = CMD_SUBSYSTEM_COMMON;
1312 		opcode = OPCODE_COMMON_CQ_DESTROY;
1313 		break;
1314 	case QTYPE_TXQ:
1315 		subsys = CMD_SUBSYSTEM_ETH;
1316 		opcode = OPCODE_ETH_TX_DESTROY;
1317 		break;
1318 	case QTYPE_RXQ:
1319 		subsys = CMD_SUBSYSTEM_ETH;
1320 		opcode = OPCODE_ETH_RX_DESTROY;
1321 		break;
1322 	case QTYPE_MCCQ:
1323 		subsys = CMD_SUBSYSTEM_COMMON;
1324 		opcode = OPCODE_COMMON_MCC_DESTROY;
1325 		break;
1326 	default:
1327 		BUG();
1328 	}
1329 
1330 	be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1331 				NULL);
1332 	req->id = cpu_to_le16(q->id);
1333 
1334 	status = be_mbox_notify_wait(adapter);
1335 	q->created = false;
1336 
1337 	mutex_unlock(&adapter->mbox_lock);
1338 	return status;
1339 }
1340 
1341 /* Uses MCC */
1342 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1343 {
1344 	struct be_mcc_wrb *wrb;
1345 	struct be_cmd_req_q_destroy *req;
1346 	int status;
1347 
1348 	spin_lock_bh(&adapter->mcc_lock);
1349 
1350 	wrb = wrb_from_mccq(adapter);
1351 	if (!wrb) {
1352 		status = -EBUSY;
1353 		goto err;
1354 	}
1355 	req = embedded_payload(wrb);
1356 
1357 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1358 			OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1359 	req->id = cpu_to_le16(q->id);
1360 
1361 	status = be_mcc_notify_wait(adapter);
1362 	q->created = false;
1363 
1364 err:
1365 	spin_unlock_bh(&adapter->mcc_lock);
1366 	return status;
1367 }
1368 
1369 /* Create an rx filtering policy configuration on an i/f
1370  * Will use MBOX only if MCCQ has not been created.
1371  */
1372 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1373 		     u32 *if_handle, u32 domain)
1374 {
1375 	struct be_mcc_wrb wrb = {0};
1376 	struct be_cmd_req_if_create *req;
1377 	int status;
1378 
1379 	req = embedded_payload(&wrb);
1380 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1381 		OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
1382 	req->hdr.domain = domain;
1383 	req->capability_flags = cpu_to_le32(cap_flags);
1384 	req->enable_flags = cpu_to_le32(en_flags);
1385 	req->pmac_invalid = true;
1386 
1387 	status = be_cmd_notify_wait(adapter, &wrb);
1388 	if (!status) {
1389 		struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1390 		*if_handle = le32_to_cpu(resp->interface_id);
1391 
1392 		/* Hack to retrieve VF's pmac-id on BE3 */
1393 		if (BE3_chip(adapter) && !be_physfn(adapter))
1394 			adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1395 	}
1396 	return status;
1397 }
1398 
1399 /* Uses MCCQ */
1400 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1401 {
1402 	struct be_mcc_wrb *wrb;
1403 	struct be_cmd_req_if_destroy *req;
1404 	int status;
1405 
1406 	if (interface_id == -1)
1407 		return 0;
1408 
1409 	spin_lock_bh(&adapter->mcc_lock);
1410 
1411 	wrb = wrb_from_mccq(adapter);
1412 	if (!wrb) {
1413 		status = -EBUSY;
1414 		goto err;
1415 	}
1416 	req = embedded_payload(wrb);
1417 
1418 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1419 		OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1420 	req->hdr.domain = domain;
1421 	req->interface_id = cpu_to_le32(interface_id);
1422 
1423 	status = be_mcc_notify_wait(adapter);
1424 err:
1425 	spin_unlock_bh(&adapter->mcc_lock);
1426 	return status;
1427 }
1428 
1429 /* Get stats is a non embedded command: the request is not embedded inside
1430  * WRB but is a separate dma memory block
1431  * Uses asynchronous MCC
1432  */
1433 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1434 {
1435 	struct be_mcc_wrb *wrb;
1436 	struct be_cmd_req_hdr *hdr;
1437 	int status = 0;
1438 
1439 	spin_lock_bh(&adapter->mcc_lock);
1440 
1441 	wrb = wrb_from_mccq(adapter);
1442 	if (!wrb) {
1443 		status = -EBUSY;
1444 		goto err;
1445 	}
1446 	hdr = nonemb_cmd->va;
1447 
1448 	be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1449 		OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1450 
1451 	/* version 1 of the cmd is not supported only by BE2 */
1452 	if (BE2_chip(adapter))
1453 		hdr->version = 0;
1454 	if (BE3_chip(adapter) || lancer_chip(adapter))
1455 		hdr->version = 1;
1456 	else
1457 		hdr->version = 2;
1458 
1459 	be_mcc_notify(adapter);
1460 	adapter->stats_cmd_sent = true;
1461 
1462 err:
1463 	spin_unlock_bh(&adapter->mcc_lock);
1464 	return status;
1465 }
1466 
1467 /* Lancer Stats */
1468 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1469 				struct be_dma_mem *nonemb_cmd)
1470 {
1471 
1472 	struct be_mcc_wrb *wrb;
1473 	struct lancer_cmd_req_pport_stats *req;
1474 	int status = 0;
1475 
1476 	if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1477 			    CMD_SUBSYSTEM_ETH))
1478 		return -EPERM;
1479 
1480 	spin_lock_bh(&adapter->mcc_lock);
1481 
1482 	wrb = wrb_from_mccq(adapter);
1483 	if (!wrb) {
1484 		status = -EBUSY;
1485 		goto err;
1486 	}
1487 	req = nonemb_cmd->va;
1488 
1489 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1490 			OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1491 			nonemb_cmd);
1492 
1493 	req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1494 	req->cmd_params.params.reset_stats = 0;
1495 
1496 	be_mcc_notify(adapter);
1497 	adapter->stats_cmd_sent = true;
1498 
1499 err:
1500 	spin_unlock_bh(&adapter->mcc_lock);
1501 	return status;
1502 }
1503 
1504 static int be_mac_to_link_speed(int mac_speed)
1505 {
1506 	switch (mac_speed) {
1507 	case PHY_LINK_SPEED_ZERO:
1508 		return 0;
1509 	case PHY_LINK_SPEED_10MBPS:
1510 		return 10;
1511 	case PHY_LINK_SPEED_100MBPS:
1512 		return 100;
1513 	case PHY_LINK_SPEED_1GBPS:
1514 		return 1000;
1515 	case PHY_LINK_SPEED_10GBPS:
1516 		return 10000;
1517 	case PHY_LINK_SPEED_20GBPS:
1518 		return 20000;
1519 	case PHY_LINK_SPEED_25GBPS:
1520 		return 25000;
1521 	case PHY_LINK_SPEED_40GBPS:
1522 		return 40000;
1523 	}
1524 	return 0;
1525 }
1526 
1527 /* Uses synchronous mcc
1528  * Returns link_speed in Mbps
1529  */
1530 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1531 			     u8 *link_status, u32 dom)
1532 {
1533 	struct be_mcc_wrb *wrb;
1534 	struct be_cmd_req_link_status *req;
1535 	int status;
1536 
1537 	spin_lock_bh(&adapter->mcc_lock);
1538 
1539 	if (link_status)
1540 		*link_status = LINK_DOWN;
1541 
1542 	wrb = wrb_from_mccq(adapter);
1543 	if (!wrb) {
1544 		status = -EBUSY;
1545 		goto err;
1546 	}
1547 	req = embedded_payload(wrb);
1548 
1549 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1550 		OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1551 
1552 	/* version 1 of the cmd is not supported only by BE2 */
1553 	if (!BE2_chip(adapter))
1554 		req->hdr.version = 1;
1555 
1556 	req->hdr.domain = dom;
1557 
1558 	status = be_mcc_notify_wait(adapter);
1559 	if (!status) {
1560 		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1561 		if (link_speed) {
1562 			*link_speed = resp->link_speed ?
1563 				      le16_to_cpu(resp->link_speed) * 10 :
1564 				      be_mac_to_link_speed(resp->mac_speed);
1565 
1566 			if (!resp->logical_link_status)
1567 				*link_speed = 0;
1568 		}
1569 		if (link_status)
1570 			*link_status = resp->logical_link_status;
1571 	}
1572 
1573 err:
1574 	spin_unlock_bh(&adapter->mcc_lock);
1575 	return status;
1576 }
1577 
1578 /* Uses synchronous mcc */
1579 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1580 {
1581 	struct be_mcc_wrb *wrb;
1582 	struct be_cmd_req_get_cntl_addnl_attribs *req;
1583 	int status = 0;
1584 
1585 	spin_lock_bh(&adapter->mcc_lock);
1586 
1587 	wrb = wrb_from_mccq(adapter);
1588 	if (!wrb) {
1589 		status = -EBUSY;
1590 		goto err;
1591 	}
1592 	req = embedded_payload(wrb);
1593 
1594 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1595 		OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1596 		wrb, NULL);
1597 
1598 	be_mcc_notify(adapter);
1599 
1600 err:
1601 	spin_unlock_bh(&adapter->mcc_lock);
1602 	return status;
1603 }
1604 
1605 /* Uses synchronous mcc */
1606 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1607 {
1608 	struct be_mcc_wrb *wrb;
1609 	struct be_cmd_req_get_fat *req;
1610 	int status;
1611 
1612 	spin_lock_bh(&adapter->mcc_lock);
1613 
1614 	wrb = wrb_from_mccq(adapter);
1615 	if (!wrb) {
1616 		status = -EBUSY;
1617 		goto err;
1618 	}
1619 	req = embedded_payload(wrb);
1620 
1621 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1622 		OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1623 	req->fat_operation = cpu_to_le32(QUERY_FAT);
1624 	status = be_mcc_notify_wait(adapter);
1625 	if (!status) {
1626 		struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1627 		if (log_size && resp->log_size)
1628 			*log_size = le32_to_cpu(resp->log_size) -
1629 					sizeof(u32);
1630 	}
1631 err:
1632 	spin_unlock_bh(&adapter->mcc_lock);
1633 	return status;
1634 }
1635 
1636 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1637 {
1638 	struct be_dma_mem get_fat_cmd;
1639 	struct be_mcc_wrb *wrb;
1640 	struct be_cmd_req_get_fat *req;
1641 	u32 offset = 0, total_size, buf_size,
1642 				log_offset = sizeof(u32), payload_len;
1643 	int status;
1644 
1645 	if (buf_len == 0)
1646 		return;
1647 
1648 	total_size = buf_len;
1649 
1650 	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1651 	get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1652 			get_fat_cmd.size,
1653 			&get_fat_cmd.dma);
1654 	if (!get_fat_cmd.va) {
1655 		status = -ENOMEM;
1656 		dev_err(&adapter->pdev->dev,
1657 		"Memory allocation failure while retrieving FAT data\n");
1658 		return;
1659 	}
1660 
1661 	spin_lock_bh(&adapter->mcc_lock);
1662 
1663 	while (total_size) {
1664 		buf_size = min(total_size, (u32)60*1024);
1665 		total_size -= buf_size;
1666 
1667 		wrb = wrb_from_mccq(adapter);
1668 		if (!wrb) {
1669 			status = -EBUSY;
1670 			goto err;
1671 		}
1672 		req = get_fat_cmd.va;
1673 
1674 		payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1675 		be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1676 				OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1677 				&get_fat_cmd);
1678 
1679 		req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1680 		req->read_log_offset = cpu_to_le32(log_offset);
1681 		req->read_log_length = cpu_to_le32(buf_size);
1682 		req->data_buffer_size = cpu_to_le32(buf_size);
1683 
1684 		status = be_mcc_notify_wait(adapter);
1685 		if (!status) {
1686 			struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1687 			memcpy(buf + offset,
1688 				resp->data_buffer,
1689 				le32_to_cpu(resp->read_log_length));
1690 		} else {
1691 			dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1692 			goto err;
1693 		}
1694 		offset += buf_size;
1695 		log_offset += buf_size;
1696 	}
1697 err:
1698 	pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1699 			get_fat_cmd.va,
1700 			get_fat_cmd.dma);
1701 	spin_unlock_bh(&adapter->mcc_lock);
1702 }
1703 
1704 /* Uses synchronous mcc */
1705 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1706 			char *fw_on_flash)
1707 {
1708 	struct be_mcc_wrb *wrb;
1709 	struct be_cmd_req_get_fw_version *req;
1710 	int status;
1711 
1712 	spin_lock_bh(&adapter->mcc_lock);
1713 
1714 	wrb = wrb_from_mccq(adapter);
1715 	if (!wrb) {
1716 		status = -EBUSY;
1717 		goto err;
1718 	}
1719 
1720 	req = embedded_payload(wrb);
1721 
1722 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1723 		OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1724 	status = be_mcc_notify_wait(adapter);
1725 	if (!status) {
1726 		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1727 		strcpy(fw_ver, resp->firmware_version_string);
1728 		if (fw_on_flash)
1729 			strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1730 	}
1731 err:
1732 	spin_unlock_bh(&adapter->mcc_lock);
1733 	return status;
1734 }
1735 
1736 /* set the EQ delay interval of an EQ to specified value
1737  * Uses async mcc
1738  */
1739 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1740 		      int num)
1741 {
1742 	struct be_mcc_wrb *wrb;
1743 	struct be_cmd_req_modify_eq_delay *req;
1744 	int status = 0, i;
1745 
1746 	spin_lock_bh(&adapter->mcc_lock);
1747 
1748 	wrb = wrb_from_mccq(adapter);
1749 	if (!wrb) {
1750 		status = -EBUSY;
1751 		goto err;
1752 	}
1753 	req = embedded_payload(wrb);
1754 
1755 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1756 		OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1757 
1758 	req->num_eq = cpu_to_le32(num);
1759 	for (i = 0; i < num; i++) {
1760 		req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1761 		req->set_eqd[i].phase = 0;
1762 		req->set_eqd[i].delay_multiplier =
1763 				cpu_to_le32(set_eqd[i].delay_multiplier);
1764 	}
1765 
1766 	be_mcc_notify(adapter);
1767 err:
1768 	spin_unlock_bh(&adapter->mcc_lock);
1769 	return status;
1770 }
1771 
1772 /* Uses sycnhronous mcc */
1773 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1774 		       u32 num, bool promiscuous)
1775 {
1776 	struct be_mcc_wrb *wrb;
1777 	struct be_cmd_req_vlan_config *req;
1778 	int status;
1779 
1780 	spin_lock_bh(&adapter->mcc_lock);
1781 
1782 	wrb = wrb_from_mccq(adapter);
1783 	if (!wrb) {
1784 		status = -EBUSY;
1785 		goto err;
1786 	}
1787 	req = embedded_payload(wrb);
1788 
1789 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1790 		OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1791 
1792 	req->interface_id = if_id;
1793 	req->promiscuous = promiscuous;
1794 	req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1795 	req->num_vlan = num;
1796 	if (!promiscuous) {
1797 		memcpy(req->normal_vlan, vtag_array,
1798 			req->num_vlan * sizeof(vtag_array[0]));
1799 	}
1800 
1801 	status = be_mcc_notify_wait(adapter);
1802 
1803 err:
1804 	spin_unlock_bh(&adapter->mcc_lock);
1805 	return status;
1806 }
1807 
1808 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1809 {
1810 	struct be_mcc_wrb *wrb;
1811 	struct be_dma_mem *mem = &adapter->rx_filter;
1812 	struct be_cmd_req_rx_filter *req = mem->va;
1813 	int status;
1814 
1815 	spin_lock_bh(&adapter->mcc_lock);
1816 
1817 	wrb = wrb_from_mccq(adapter);
1818 	if (!wrb) {
1819 		status = -EBUSY;
1820 		goto err;
1821 	}
1822 	memset(req, 0, sizeof(*req));
1823 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1824 				OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1825 				wrb, mem);
1826 
1827 	req->if_id = cpu_to_le32(adapter->if_handle);
1828 	if (flags & IFF_PROMISC) {
1829 		req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1830 					BE_IF_FLAGS_VLAN_PROMISCUOUS |
1831 					BE_IF_FLAGS_MCAST_PROMISCUOUS);
1832 		if (value == ON)
1833 			req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1834 						BE_IF_FLAGS_VLAN_PROMISCUOUS |
1835 						BE_IF_FLAGS_MCAST_PROMISCUOUS);
1836 	} else if (flags & IFF_ALLMULTI) {
1837 		req->if_flags_mask = req->if_flags =
1838 				cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1839 	} else if (flags & BE_FLAGS_VLAN_PROMISC) {
1840 		req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1841 
1842 		if (value == ON)
1843 			req->if_flags =
1844 				cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1845 	} else {
1846 		struct netdev_hw_addr *ha;
1847 		int i = 0;
1848 
1849 		req->if_flags_mask = req->if_flags =
1850 				cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1851 
1852 		/* Reset mcast promisc mode if already set by setting mask
1853 		 * and not setting flags field
1854 		 */
1855 		req->if_flags_mask |=
1856 			cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1857 				    be_if_cap_flags(adapter));
1858 		req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1859 		netdev_for_each_mc_addr(ha, adapter->netdev)
1860 			memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1861 	}
1862 
1863 	if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
1864 	     req->if_flags_mask) {
1865 		dev_warn(&adapter->pdev->dev,
1866 			 "Cannot set rx filter flags 0x%x\n",
1867 			 req->if_flags_mask);
1868 		dev_warn(&adapter->pdev->dev,
1869 			 "Interface is capable of 0x%x flags only\n",
1870 			 be_if_cap_flags(adapter));
1871 	}
1872 	req->if_flags_mask &= cpu_to_le32(be_if_cap_flags(adapter));
1873 
1874 	status = be_mcc_notify_wait(adapter);
1875 
1876 err:
1877 	spin_unlock_bh(&adapter->mcc_lock);
1878 	return status;
1879 }
1880 
1881 /* Uses synchrounous mcc */
1882 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1883 {
1884 	struct be_mcc_wrb *wrb;
1885 	struct be_cmd_req_set_flow_control *req;
1886 	int status;
1887 
1888 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1889 			    CMD_SUBSYSTEM_COMMON))
1890 		return -EPERM;
1891 
1892 	spin_lock_bh(&adapter->mcc_lock);
1893 
1894 	wrb = wrb_from_mccq(adapter);
1895 	if (!wrb) {
1896 		status = -EBUSY;
1897 		goto err;
1898 	}
1899 	req = embedded_payload(wrb);
1900 
1901 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1902 		OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1903 
1904 	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1905 	req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1906 
1907 	status = be_mcc_notify_wait(adapter);
1908 
1909 err:
1910 	spin_unlock_bh(&adapter->mcc_lock);
1911 	return status;
1912 }
1913 
1914 /* Uses sycn mcc */
1915 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1916 {
1917 	struct be_mcc_wrb *wrb;
1918 	struct be_cmd_req_get_flow_control *req;
1919 	int status;
1920 
1921 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1922 			    CMD_SUBSYSTEM_COMMON))
1923 		return -EPERM;
1924 
1925 	spin_lock_bh(&adapter->mcc_lock);
1926 
1927 	wrb = wrb_from_mccq(adapter);
1928 	if (!wrb) {
1929 		status = -EBUSY;
1930 		goto err;
1931 	}
1932 	req = embedded_payload(wrb);
1933 
1934 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1935 		OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1936 
1937 	status = be_mcc_notify_wait(adapter);
1938 	if (!status) {
1939 		struct be_cmd_resp_get_flow_control *resp =
1940 						embedded_payload(wrb);
1941 		*tx_fc = le16_to_cpu(resp->tx_flow_control);
1942 		*rx_fc = le16_to_cpu(resp->rx_flow_control);
1943 	}
1944 
1945 err:
1946 	spin_unlock_bh(&adapter->mcc_lock);
1947 	return status;
1948 }
1949 
1950 /* Uses mbox */
1951 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1952 			u32 *mode, u32 *caps, u16 *asic_rev)
1953 {
1954 	struct be_mcc_wrb *wrb;
1955 	struct be_cmd_req_query_fw_cfg *req;
1956 	int status;
1957 
1958 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1959 		return -1;
1960 
1961 	wrb = wrb_from_mbox(adapter);
1962 	req = embedded_payload(wrb);
1963 
1964 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1965 		OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1966 
1967 	status = be_mbox_notify_wait(adapter);
1968 	if (!status) {
1969 		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1970 		*port_num = le32_to_cpu(resp->phys_port);
1971 		*mode = le32_to_cpu(resp->function_mode);
1972 		*caps = le32_to_cpu(resp->function_caps);
1973 		*asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
1974 	}
1975 
1976 	mutex_unlock(&adapter->mbox_lock);
1977 	return status;
1978 }
1979 
1980 /* Uses mbox */
1981 int be_cmd_reset_function(struct be_adapter *adapter)
1982 {
1983 	struct be_mcc_wrb *wrb;
1984 	struct be_cmd_req_hdr *req;
1985 	int status;
1986 
1987 	if (lancer_chip(adapter)) {
1988 		status = lancer_wait_ready(adapter);
1989 		if (!status) {
1990 			iowrite32(SLI_PORT_CONTROL_IP_MASK,
1991 				  adapter->db + SLIPORT_CONTROL_OFFSET);
1992 			status = lancer_test_and_set_rdy_state(adapter);
1993 		}
1994 		if (status) {
1995 			dev_err(&adapter->pdev->dev,
1996 				"Adapter in non recoverable error\n");
1997 		}
1998 		return status;
1999 	}
2000 
2001 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2002 		return -1;
2003 
2004 	wrb = wrb_from_mbox(adapter);
2005 	req = embedded_payload(wrb);
2006 
2007 	be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2008 		OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
2009 
2010 	status = be_mbox_notify_wait(adapter);
2011 
2012 	mutex_unlock(&adapter->mbox_lock);
2013 	return status;
2014 }
2015 
2016 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2017 			u32 rss_hash_opts, u16 table_size)
2018 {
2019 	struct be_mcc_wrb *wrb;
2020 	struct be_cmd_req_rss_config *req;
2021 	u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
2022 			0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
2023 			0x3ea83c02, 0x4a110304};
2024 	int status;
2025 
2026 	if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2027 		return 0;
2028 
2029 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2030 		return -1;
2031 
2032 	wrb = wrb_from_mbox(adapter);
2033 	req = embedded_payload(wrb);
2034 
2035 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2036 		OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2037 
2038 	req->if_id = cpu_to_le32(adapter->if_handle);
2039 	req->enable_rss = cpu_to_le16(rss_hash_opts);
2040 	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2041 
2042 	if (lancer_chip(adapter) || skyhawk_chip(adapter))
2043 		req->hdr.version = 1;
2044 
2045 	memcpy(req->cpu_table, rsstable, table_size);
2046 	memcpy(req->hash, myhash, sizeof(myhash));
2047 	be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2048 
2049 	status = be_mbox_notify_wait(adapter);
2050 
2051 	mutex_unlock(&adapter->mbox_lock);
2052 	return status;
2053 }
2054 
2055 /* Uses sync mcc */
2056 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2057 			u8 bcn, u8 sts, u8 state)
2058 {
2059 	struct be_mcc_wrb *wrb;
2060 	struct be_cmd_req_enable_disable_beacon *req;
2061 	int status;
2062 
2063 	spin_lock_bh(&adapter->mcc_lock);
2064 
2065 	wrb = wrb_from_mccq(adapter);
2066 	if (!wrb) {
2067 		status = -EBUSY;
2068 		goto err;
2069 	}
2070 	req = embedded_payload(wrb);
2071 
2072 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2073 		OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
2074 
2075 	req->port_num = port_num;
2076 	req->beacon_state = state;
2077 	req->beacon_duration = bcn;
2078 	req->status_duration = sts;
2079 
2080 	status = be_mcc_notify_wait(adapter);
2081 
2082 err:
2083 	spin_unlock_bh(&adapter->mcc_lock);
2084 	return status;
2085 }
2086 
2087 /* Uses sync mcc */
2088 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2089 {
2090 	struct be_mcc_wrb *wrb;
2091 	struct be_cmd_req_get_beacon_state *req;
2092 	int status;
2093 
2094 	spin_lock_bh(&adapter->mcc_lock);
2095 
2096 	wrb = wrb_from_mccq(adapter);
2097 	if (!wrb) {
2098 		status = -EBUSY;
2099 		goto err;
2100 	}
2101 	req = embedded_payload(wrb);
2102 
2103 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2104 		OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
2105 
2106 	req->port_num = port_num;
2107 
2108 	status = be_mcc_notify_wait(adapter);
2109 	if (!status) {
2110 		struct be_cmd_resp_get_beacon_state *resp =
2111 						embedded_payload(wrb);
2112 		*state = resp->beacon_state;
2113 	}
2114 
2115 err:
2116 	spin_unlock_bh(&adapter->mcc_lock);
2117 	return status;
2118 }
2119 
2120 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2121 			    u32 data_size, u32 data_offset,
2122 			    const char *obj_name, u32 *data_written,
2123 			    u8 *change_status, u8 *addn_status)
2124 {
2125 	struct be_mcc_wrb *wrb;
2126 	struct lancer_cmd_req_write_object *req;
2127 	struct lancer_cmd_resp_write_object *resp;
2128 	void *ctxt = NULL;
2129 	int status;
2130 
2131 	spin_lock_bh(&adapter->mcc_lock);
2132 	adapter->flash_status = 0;
2133 
2134 	wrb = wrb_from_mccq(adapter);
2135 	if (!wrb) {
2136 		status = -EBUSY;
2137 		goto err_unlock;
2138 	}
2139 
2140 	req = embedded_payload(wrb);
2141 
2142 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2143 				OPCODE_COMMON_WRITE_OBJECT,
2144 				sizeof(struct lancer_cmd_req_write_object), wrb,
2145 				NULL);
2146 
2147 	ctxt = &req->context;
2148 	AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2149 			write_length, ctxt, data_size);
2150 
2151 	if (data_size == 0)
2152 		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2153 				eof, ctxt, 1);
2154 	else
2155 		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2156 				eof, ctxt, 0);
2157 
2158 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
2159 	req->write_offset = cpu_to_le32(data_offset);
2160 	strcpy(req->object_name, obj_name);
2161 	req->descriptor_count = cpu_to_le32(1);
2162 	req->buf_len = cpu_to_le32(data_size);
2163 	req->addr_low = cpu_to_le32((cmd->dma +
2164 				sizeof(struct lancer_cmd_req_write_object))
2165 				& 0xFFFFFFFF);
2166 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2167 				sizeof(struct lancer_cmd_req_write_object)));
2168 
2169 	be_mcc_notify(adapter);
2170 	spin_unlock_bh(&adapter->mcc_lock);
2171 
2172 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2173 					 msecs_to_jiffies(60000)))
2174 		status = -1;
2175 	else
2176 		status = adapter->flash_status;
2177 
2178 	resp = embedded_payload(wrb);
2179 	if (!status) {
2180 		*data_written = le32_to_cpu(resp->actual_write_len);
2181 		*change_status = resp->change_status;
2182 	} else {
2183 		*addn_status = resp->additional_status;
2184 	}
2185 
2186 	return status;
2187 
2188 err_unlock:
2189 	spin_unlock_bh(&adapter->mcc_lock);
2190 	return status;
2191 }
2192 
2193 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2194 		u32 data_size, u32 data_offset, const char *obj_name,
2195 		u32 *data_read, u32 *eof, u8 *addn_status)
2196 {
2197 	struct be_mcc_wrb *wrb;
2198 	struct lancer_cmd_req_read_object *req;
2199 	struct lancer_cmd_resp_read_object *resp;
2200 	int status;
2201 
2202 	spin_lock_bh(&adapter->mcc_lock);
2203 
2204 	wrb = wrb_from_mccq(adapter);
2205 	if (!wrb) {
2206 		status = -EBUSY;
2207 		goto err_unlock;
2208 	}
2209 
2210 	req = embedded_payload(wrb);
2211 
2212 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2213 			OPCODE_COMMON_READ_OBJECT,
2214 			sizeof(struct lancer_cmd_req_read_object), wrb,
2215 			NULL);
2216 
2217 	req->desired_read_len = cpu_to_le32(data_size);
2218 	req->read_offset = cpu_to_le32(data_offset);
2219 	strcpy(req->object_name, obj_name);
2220 	req->descriptor_count = cpu_to_le32(1);
2221 	req->buf_len = cpu_to_le32(data_size);
2222 	req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2223 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2224 
2225 	status = be_mcc_notify_wait(adapter);
2226 
2227 	resp = embedded_payload(wrb);
2228 	if (!status) {
2229 		*data_read = le32_to_cpu(resp->actual_read_len);
2230 		*eof = le32_to_cpu(resp->eof);
2231 	} else {
2232 		*addn_status = resp->additional_status;
2233 	}
2234 
2235 err_unlock:
2236 	spin_unlock_bh(&adapter->mcc_lock);
2237 	return status;
2238 }
2239 
2240 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2241 			u32 flash_type, u32 flash_opcode, u32 buf_size)
2242 {
2243 	struct be_mcc_wrb *wrb;
2244 	struct be_cmd_write_flashrom *req;
2245 	int status;
2246 
2247 	spin_lock_bh(&adapter->mcc_lock);
2248 	adapter->flash_status = 0;
2249 
2250 	wrb = wrb_from_mccq(adapter);
2251 	if (!wrb) {
2252 		status = -EBUSY;
2253 		goto err_unlock;
2254 	}
2255 	req = cmd->va;
2256 
2257 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2258 		OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2259 
2260 	req->params.op_type = cpu_to_le32(flash_type);
2261 	req->params.op_code = cpu_to_le32(flash_opcode);
2262 	req->params.data_buf_size = cpu_to_le32(buf_size);
2263 
2264 	be_mcc_notify(adapter);
2265 	spin_unlock_bh(&adapter->mcc_lock);
2266 
2267 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2268 					 msecs_to_jiffies(40000)))
2269 		status = -1;
2270 	else
2271 		status = adapter->flash_status;
2272 
2273 	return status;
2274 
2275 err_unlock:
2276 	spin_unlock_bh(&adapter->mcc_lock);
2277 	return status;
2278 }
2279 
2280 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2281 			 int offset)
2282 {
2283 	struct be_mcc_wrb *wrb;
2284 	struct be_cmd_read_flash_crc *req;
2285 	int status;
2286 
2287 	spin_lock_bh(&adapter->mcc_lock);
2288 
2289 	wrb = wrb_from_mccq(adapter);
2290 	if (!wrb) {
2291 		status = -EBUSY;
2292 		goto err;
2293 	}
2294 	req = embedded_payload(wrb);
2295 
2296 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2297 			       OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2298 			       wrb, NULL);
2299 
2300 	req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2301 	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2302 	req->params.offset = cpu_to_le32(offset);
2303 	req->params.data_buf_size = cpu_to_le32(0x4);
2304 
2305 	status = be_mcc_notify_wait(adapter);
2306 	if (!status)
2307 		memcpy(flashed_crc, req->crc, 4);
2308 
2309 err:
2310 	spin_unlock_bh(&adapter->mcc_lock);
2311 	return status;
2312 }
2313 
2314 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2315 				struct be_dma_mem *nonemb_cmd)
2316 {
2317 	struct be_mcc_wrb *wrb;
2318 	struct be_cmd_req_acpi_wol_magic_config *req;
2319 	int status;
2320 
2321 	spin_lock_bh(&adapter->mcc_lock);
2322 
2323 	wrb = wrb_from_mccq(adapter);
2324 	if (!wrb) {
2325 		status = -EBUSY;
2326 		goto err;
2327 	}
2328 	req = nonemb_cmd->va;
2329 
2330 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2331 		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2332 		nonemb_cmd);
2333 	memcpy(req->magic_mac, mac, ETH_ALEN);
2334 
2335 	status = be_mcc_notify_wait(adapter);
2336 
2337 err:
2338 	spin_unlock_bh(&adapter->mcc_lock);
2339 	return status;
2340 }
2341 
2342 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2343 			u8 loopback_type, u8 enable)
2344 {
2345 	struct be_mcc_wrb *wrb;
2346 	struct be_cmd_req_set_lmode *req;
2347 	int status;
2348 
2349 	spin_lock_bh(&adapter->mcc_lock);
2350 
2351 	wrb = wrb_from_mccq(adapter);
2352 	if (!wrb) {
2353 		status = -EBUSY;
2354 		goto err;
2355 	}
2356 
2357 	req = embedded_payload(wrb);
2358 
2359 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2360 			OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2361 			NULL);
2362 
2363 	req->src_port = port_num;
2364 	req->dest_port = port_num;
2365 	req->loopback_type = loopback_type;
2366 	req->loopback_state = enable;
2367 
2368 	status = be_mcc_notify_wait(adapter);
2369 err:
2370 	spin_unlock_bh(&adapter->mcc_lock);
2371 	return status;
2372 }
2373 
2374 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2375 		u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2376 {
2377 	struct be_mcc_wrb *wrb;
2378 	struct be_cmd_req_loopback_test *req;
2379 	struct be_cmd_resp_loopback_test *resp;
2380 	int status;
2381 
2382 	spin_lock_bh(&adapter->mcc_lock);
2383 
2384 	wrb = wrb_from_mccq(adapter);
2385 	if (!wrb) {
2386 		status = -EBUSY;
2387 		goto err;
2388 	}
2389 
2390 	req = embedded_payload(wrb);
2391 
2392 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2393 			OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2394 
2395 	req->hdr.timeout = cpu_to_le32(15);
2396 	req->pattern = cpu_to_le64(pattern);
2397 	req->src_port = cpu_to_le32(port_num);
2398 	req->dest_port = cpu_to_le32(port_num);
2399 	req->pkt_size = cpu_to_le32(pkt_size);
2400 	req->num_pkts = cpu_to_le32(num_pkts);
2401 	req->loopback_type = cpu_to_le32(loopback_type);
2402 
2403 	be_mcc_notify(adapter);
2404 
2405 	spin_unlock_bh(&adapter->mcc_lock);
2406 
2407 	wait_for_completion(&adapter->et_cmd_compl);
2408 	resp = embedded_payload(wrb);
2409 	status = le32_to_cpu(resp->status);
2410 
2411 	return status;
2412 err:
2413 	spin_unlock_bh(&adapter->mcc_lock);
2414 	return status;
2415 }
2416 
2417 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2418 				u32 byte_cnt, struct be_dma_mem *cmd)
2419 {
2420 	struct be_mcc_wrb *wrb;
2421 	struct be_cmd_req_ddrdma_test *req;
2422 	int status;
2423 	int i, j = 0;
2424 
2425 	spin_lock_bh(&adapter->mcc_lock);
2426 
2427 	wrb = wrb_from_mccq(adapter);
2428 	if (!wrb) {
2429 		status = -EBUSY;
2430 		goto err;
2431 	}
2432 	req = cmd->va;
2433 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2434 			OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2435 
2436 	req->pattern = cpu_to_le64(pattern);
2437 	req->byte_count = cpu_to_le32(byte_cnt);
2438 	for (i = 0; i < byte_cnt; i++) {
2439 		req->snd_buff[i] = (u8)(pattern >> (j*8));
2440 		j++;
2441 		if (j > 7)
2442 			j = 0;
2443 	}
2444 
2445 	status = be_mcc_notify_wait(adapter);
2446 
2447 	if (!status) {
2448 		struct be_cmd_resp_ddrdma_test *resp;
2449 		resp = cmd->va;
2450 		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2451 				resp->snd_err) {
2452 			status = -1;
2453 		}
2454 	}
2455 
2456 err:
2457 	spin_unlock_bh(&adapter->mcc_lock);
2458 	return status;
2459 }
2460 
2461 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2462 				struct be_dma_mem *nonemb_cmd)
2463 {
2464 	struct be_mcc_wrb *wrb;
2465 	struct be_cmd_req_seeprom_read *req;
2466 	int status;
2467 
2468 	spin_lock_bh(&adapter->mcc_lock);
2469 
2470 	wrb = wrb_from_mccq(adapter);
2471 	if (!wrb) {
2472 		status = -EBUSY;
2473 		goto err;
2474 	}
2475 	req = nonemb_cmd->va;
2476 
2477 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2478 			OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2479 			nonemb_cmd);
2480 
2481 	status = be_mcc_notify_wait(adapter);
2482 
2483 err:
2484 	spin_unlock_bh(&adapter->mcc_lock);
2485 	return status;
2486 }
2487 
2488 int be_cmd_get_phy_info(struct be_adapter *adapter)
2489 {
2490 	struct be_mcc_wrb *wrb;
2491 	struct be_cmd_req_get_phy_info *req;
2492 	struct be_dma_mem cmd;
2493 	int status;
2494 
2495 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2496 			    CMD_SUBSYSTEM_COMMON))
2497 		return -EPERM;
2498 
2499 	spin_lock_bh(&adapter->mcc_lock);
2500 
2501 	wrb = wrb_from_mccq(adapter);
2502 	if (!wrb) {
2503 		status = -EBUSY;
2504 		goto err;
2505 	}
2506 	cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2507 	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2508 					&cmd.dma);
2509 	if (!cmd.va) {
2510 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2511 		status = -ENOMEM;
2512 		goto err;
2513 	}
2514 
2515 	req = cmd.va;
2516 
2517 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2518 			OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2519 			wrb, &cmd);
2520 
2521 	status = be_mcc_notify_wait(adapter);
2522 	if (!status) {
2523 		struct be_phy_info *resp_phy_info =
2524 				cmd.va + sizeof(struct be_cmd_req_hdr);
2525 		adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2526 		adapter->phy.interface_type =
2527 			le16_to_cpu(resp_phy_info->interface_type);
2528 		adapter->phy.auto_speeds_supported =
2529 			le16_to_cpu(resp_phy_info->auto_speeds_supported);
2530 		adapter->phy.fixed_speeds_supported =
2531 			le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2532 		adapter->phy.misc_params =
2533 			le32_to_cpu(resp_phy_info->misc_params);
2534 
2535 		if (BE2_chip(adapter)) {
2536 			adapter->phy.fixed_speeds_supported =
2537 				BE_SUPPORTED_SPEED_10GBPS |
2538 				BE_SUPPORTED_SPEED_1GBPS;
2539 		}
2540 	}
2541 	pci_free_consistent(adapter->pdev, cmd.size,
2542 				cmd.va, cmd.dma);
2543 err:
2544 	spin_unlock_bh(&adapter->mcc_lock);
2545 	return status;
2546 }
2547 
2548 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2549 {
2550 	struct be_mcc_wrb *wrb;
2551 	struct be_cmd_req_set_qos *req;
2552 	int status;
2553 
2554 	spin_lock_bh(&adapter->mcc_lock);
2555 
2556 	wrb = wrb_from_mccq(adapter);
2557 	if (!wrb) {
2558 		status = -EBUSY;
2559 		goto err;
2560 	}
2561 
2562 	req = embedded_payload(wrb);
2563 
2564 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2565 			OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2566 
2567 	req->hdr.domain = domain;
2568 	req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2569 	req->max_bps_nic = cpu_to_le32(bps);
2570 
2571 	status = be_mcc_notify_wait(adapter);
2572 
2573 err:
2574 	spin_unlock_bh(&adapter->mcc_lock);
2575 	return status;
2576 }
2577 
2578 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2579 {
2580 	struct be_mcc_wrb *wrb;
2581 	struct be_cmd_req_cntl_attribs *req;
2582 	struct be_cmd_resp_cntl_attribs *resp;
2583 	int status;
2584 	int payload_len = max(sizeof(*req), sizeof(*resp));
2585 	struct mgmt_controller_attrib *attribs;
2586 	struct be_dma_mem attribs_cmd;
2587 
2588 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2589 		return -1;
2590 
2591 	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2592 	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2593 	attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2594 						&attribs_cmd.dma);
2595 	if (!attribs_cmd.va) {
2596 		dev_err(&adapter->pdev->dev,
2597 				"Memory allocation failure\n");
2598 		status = -ENOMEM;
2599 		goto err;
2600 	}
2601 
2602 	wrb = wrb_from_mbox(adapter);
2603 	if (!wrb) {
2604 		status = -EBUSY;
2605 		goto err;
2606 	}
2607 	req = attribs_cmd.va;
2608 
2609 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2610 			 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2611 			&attribs_cmd);
2612 
2613 	status = be_mbox_notify_wait(adapter);
2614 	if (!status) {
2615 		attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2616 		adapter->hba_port_num = attribs->hba_attribs.phy_port;
2617 	}
2618 
2619 err:
2620 	mutex_unlock(&adapter->mbox_lock);
2621 	if (attribs_cmd.va)
2622 		pci_free_consistent(adapter->pdev, attribs_cmd.size,
2623 				    attribs_cmd.va, attribs_cmd.dma);
2624 	return status;
2625 }
2626 
2627 /* Uses mbox */
2628 int be_cmd_req_native_mode(struct be_adapter *adapter)
2629 {
2630 	struct be_mcc_wrb *wrb;
2631 	struct be_cmd_req_set_func_cap *req;
2632 	int status;
2633 
2634 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2635 		return -1;
2636 
2637 	wrb = wrb_from_mbox(adapter);
2638 	if (!wrb) {
2639 		status = -EBUSY;
2640 		goto err;
2641 	}
2642 
2643 	req = embedded_payload(wrb);
2644 
2645 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2646 		OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2647 
2648 	req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2649 				CAPABILITY_BE3_NATIVE_ERX_API);
2650 	req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2651 
2652 	status = be_mbox_notify_wait(adapter);
2653 	if (!status) {
2654 		struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2655 		adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2656 					CAPABILITY_BE3_NATIVE_ERX_API;
2657 		if (!adapter->be3_native)
2658 			dev_warn(&adapter->pdev->dev,
2659 				 "adapter not in advanced mode\n");
2660 	}
2661 err:
2662 	mutex_unlock(&adapter->mbox_lock);
2663 	return status;
2664 }
2665 
2666 /* Get privilege(s) for a function */
2667 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2668 			     u32 domain)
2669 {
2670 	struct be_mcc_wrb *wrb;
2671 	struct be_cmd_req_get_fn_privileges *req;
2672 	int status;
2673 
2674 	spin_lock_bh(&adapter->mcc_lock);
2675 
2676 	wrb = wrb_from_mccq(adapter);
2677 	if (!wrb) {
2678 		status = -EBUSY;
2679 		goto err;
2680 	}
2681 
2682 	req = embedded_payload(wrb);
2683 
2684 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2685 			       OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2686 			       wrb, NULL);
2687 
2688 	req->hdr.domain = domain;
2689 
2690 	status = be_mcc_notify_wait(adapter);
2691 	if (!status) {
2692 		struct be_cmd_resp_get_fn_privileges *resp =
2693 						embedded_payload(wrb);
2694 		*privilege = le32_to_cpu(resp->privilege_mask);
2695 	}
2696 
2697 err:
2698 	spin_unlock_bh(&adapter->mcc_lock);
2699 	return status;
2700 }
2701 
2702 /* Set privilege(s) for a function */
2703 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2704 			     u32 domain)
2705 {
2706 	struct be_mcc_wrb *wrb;
2707 	struct be_cmd_req_set_fn_privileges *req;
2708 	int status;
2709 
2710 	spin_lock_bh(&adapter->mcc_lock);
2711 
2712 	wrb = wrb_from_mccq(adapter);
2713 	if (!wrb) {
2714 		status = -EBUSY;
2715 		goto err;
2716 	}
2717 
2718 	req = embedded_payload(wrb);
2719 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2720 			       OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2721 			       wrb, NULL);
2722 	req->hdr.domain = domain;
2723 	if (lancer_chip(adapter))
2724 		req->privileges_lancer = cpu_to_le32(privileges);
2725 	else
2726 		req->privileges = cpu_to_le32(privileges);
2727 
2728 	status = be_mcc_notify_wait(adapter);
2729 err:
2730 	spin_unlock_bh(&adapter->mcc_lock);
2731 	return status;
2732 }
2733 
2734 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2735  * pmac_id_valid: false => pmac_id or MAC address is requested.
2736  *		  If pmac_id is returned, pmac_id_valid is returned as true
2737  */
2738 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2739 			     bool *pmac_id_valid, u32 *pmac_id, u8 domain)
2740 {
2741 	struct be_mcc_wrb *wrb;
2742 	struct be_cmd_req_get_mac_list *req;
2743 	int status;
2744 	int mac_count;
2745 	struct be_dma_mem get_mac_list_cmd;
2746 	int i;
2747 
2748 	memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2749 	get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2750 	get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2751 			get_mac_list_cmd.size,
2752 			&get_mac_list_cmd.dma);
2753 
2754 	if (!get_mac_list_cmd.va) {
2755 		dev_err(&adapter->pdev->dev,
2756 				"Memory allocation failure during GET_MAC_LIST\n");
2757 		return -ENOMEM;
2758 	}
2759 
2760 	spin_lock_bh(&adapter->mcc_lock);
2761 
2762 	wrb = wrb_from_mccq(adapter);
2763 	if (!wrb) {
2764 		status = -EBUSY;
2765 		goto out;
2766 	}
2767 
2768 	req = get_mac_list_cmd.va;
2769 
2770 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2771 			       OPCODE_COMMON_GET_MAC_LIST,
2772 			       get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
2773 	req->hdr.domain = domain;
2774 	req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2775 	if (*pmac_id_valid) {
2776 		req->mac_id = cpu_to_le32(*pmac_id);
2777 		req->iface_id = cpu_to_le16(adapter->if_handle);
2778 		req->perm_override = 0;
2779 	} else {
2780 		req->perm_override = 1;
2781 	}
2782 
2783 	status = be_mcc_notify_wait(adapter);
2784 	if (!status) {
2785 		struct be_cmd_resp_get_mac_list *resp =
2786 						get_mac_list_cmd.va;
2787 
2788 		if (*pmac_id_valid) {
2789 			memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
2790 			       ETH_ALEN);
2791 			goto out;
2792 		}
2793 
2794 		mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2795 		/* Mac list returned could contain one or more active mac_ids
2796 		 * or one or more true or pseudo permanant mac addresses.
2797 		 * If an active mac_id is present, return first active mac_id
2798 		 * found.
2799 		 */
2800 		for (i = 0; i < mac_count; i++) {
2801 			struct get_list_macaddr *mac_entry;
2802 			u16 mac_addr_size;
2803 			u32 mac_id;
2804 
2805 			mac_entry = &resp->macaddr_list[i];
2806 			mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2807 			/* mac_id is a 32 bit value and mac_addr size
2808 			 * is 6 bytes
2809 			 */
2810 			if (mac_addr_size == sizeof(u32)) {
2811 				*pmac_id_valid = true;
2812 				mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2813 				*pmac_id = le32_to_cpu(mac_id);
2814 				goto out;
2815 			}
2816 		}
2817 		/* If no active mac_id found, return first mac addr */
2818 		*pmac_id_valid = false;
2819 		memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2820 								ETH_ALEN);
2821 	}
2822 
2823 out:
2824 	spin_unlock_bh(&adapter->mcc_lock);
2825 	pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2826 			get_mac_list_cmd.va, get_mac_list_cmd.dma);
2827 	return status;
2828 }
2829 
2830 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac)
2831 {
2832 	bool active = true;
2833 
2834 	if (BEx_chip(adapter))
2835 		return be_cmd_mac_addr_query(adapter, mac, false,
2836 					     adapter->if_handle, curr_pmac_id);
2837 	else
2838 		/* Fetch the MAC address using pmac_id */
2839 		return be_cmd_get_mac_from_list(adapter, mac, &active,
2840 						&curr_pmac_id, 0);
2841 }
2842 
2843 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
2844 {
2845 	int status;
2846 	bool pmac_valid = false;
2847 
2848 	memset(mac, 0, ETH_ALEN);
2849 
2850 	if (BEx_chip(adapter)) {
2851 		if (be_physfn(adapter))
2852 			status = be_cmd_mac_addr_query(adapter, mac, true, 0,
2853 						       0);
2854 		else
2855 			status = be_cmd_mac_addr_query(adapter, mac, false,
2856 						       adapter->if_handle, 0);
2857 	} else {
2858 		status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
2859 						  NULL, 0);
2860 	}
2861 
2862 	return status;
2863 }
2864 
2865 /* Uses synchronous MCCQ */
2866 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2867 			u8 mac_count, u32 domain)
2868 {
2869 	struct be_mcc_wrb *wrb;
2870 	struct be_cmd_req_set_mac_list *req;
2871 	int status;
2872 	struct be_dma_mem cmd;
2873 
2874 	memset(&cmd, 0, sizeof(struct be_dma_mem));
2875 	cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2876 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2877 			&cmd.dma, GFP_KERNEL);
2878 	if (!cmd.va)
2879 		return -ENOMEM;
2880 
2881 	spin_lock_bh(&adapter->mcc_lock);
2882 
2883 	wrb = wrb_from_mccq(adapter);
2884 	if (!wrb) {
2885 		status = -EBUSY;
2886 		goto err;
2887 	}
2888 
2889 	req = cmd.va;
2890 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2891 				OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2892 				wrb, &cmd);
2893 
2894 	req->hdr.domain = domain;
2895 	req->mac_count = mac_count;
2896 	if (mac_count)
2897 		memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2898 
2899 	status = be_mcc_notify_wait(adapter);
2900 
2901 err:
2902 	dma_free_coherent(&adapter->pdev->dev, cmd.size,
2903 				cmd.va, cmd.dma);
2904 	spin_unlock_bh(&adapter->mcc_lock);
2905 	return status;
2906 }
2907 
2908 /* Wrapper to delete any active MACs and provision the new mac.
2909  * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
2910  * current list are active.
2911  */
2912 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
2913 {
2914 	bool active_mac = false;
2915 	u8 old_mac[ETH_ALEN];
2916 	u32 pmac_id;
2917 	int status;
2918 
2919 	status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
2920 					  &pmac_id, dom);
2921 	if (!status && active_mac)
2922 		be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
2923 
2924 	return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
2925 }
2926 
2927 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2928 			  u32 domain, u16 intf_id, u16 hsw_mode)
2929 {
2930 	struct be_mcc_wrb *wrb;
2931 	struct be_cmd_req_set_hsw_config *req;
2932 	void *ctxt;
2933 	int status;
2934 
2935 	spin_lock_bh(&adapter->mcc_lock);
2936 
2937 	wrb = wrb_from_mccq(adapter);
2938 	if (!wrb) {
2939 		status = -EBUSY;
2940 		goto err;
2941 	}
2942 
2943 	req = embedded_payload(wrb);
2944 	ctxt = &req->context;
2945 
2946 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2947 			OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2948 
2949 	req->hdr.domain = domain;
2950 	AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2951 	if (pvid) {
2952 		AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2953 		AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2954 	}
2955 	if (!BEx_chip(adapter) && hsw_mode) {
2956 		AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
2957 			      ctxt, adapter->hba_port_num);
2958 		AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
2959 		AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
2960 			      ctxt, hsw_mode);
2961 	}
2962 
2963 	be_dws_cpu_to_le(req->context, sizeof(req->context));
2964 	status = be_mcc_notify_wait(adapter);
2965 
2966 err:
2967 	spin_unlock_bh(&adapter->mcc_lock);
2968 	return status;
2969 }
2970 
2971 /* Get Hyper switch config */
2972 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2973 			  u32 domain, u16 intf_id, u8 *mode)
2974 {
2975 	struct be_mcc_wrb *wrb;
2976 	struct be_cmd_req_get_hsw_config *req;
2977 	void *ctxt;
2978 	int status;
2979 	u16 vid;
2980 
2981 	spin_lock_bh(&adapter->mcc_lock);
2982 
2983 	wrb = wrb_from_mccq(adapter);
2984 	if (!wrb) {
2985 		status = -EBUSY;
2986 		goto err;
2987 	}
2988 
2989 	req = embedded_payload(wrb);
2990 	ctxt = &req->context;
2991 
2992 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2993 			OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2994 
2995 	req->hdr.domain = domain;
2996 	AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
2997 		      ctxt, intf_id);
2998 	AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2999 
3000 	if (!BEx_chip(adapter)) {
3001 		AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3002 			      ctxt, adapter->hba_port_num);
3003 		AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3004 	}
3005 	be_dws_cpu_to_le(req->context, sizeof(req->context));
3006 
3007 	status = be_mcc_notify_wait(adapter);
3008 	if (!status) {
3009 		struct be_cmd_resp_get_hsw_config *resp =
3010 						embedded_payload(wrb);
3011 		be_dws_le_to_cpu(&resp->context,
3012 						sizeof(resp->context));
3013 		vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3014 							pvid, &resp->context);
3015 		if (pvid)
3016 			*pvid = le16_to_cpu(vid);
3017 		if (mode)
3018 			*mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3019 					      port_fwd_type, &resp->context);
3020 	}
3021 
3022 err:
3023 	spin_unlock_bh(&adapter->mcc_lock);
3024 	return status;
3025 }
3026 
3027 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3028 {
3029 	struct be_mcc_wrb *wrb;
3030 	struct be_cmd_req_acpi_wol_magic_config_v1 *req;
3031 	int status;
3032 	int payload_len = sizeof(*req);
3033 	struct be_dma_mem cmd;
3034 
3035 	if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3036 			    CMD_SUBSYSTEM_ETH))
3037 		return -EPERM;
3038 
3039 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3040 		return -1;
3041 
3042 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3043 	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3044 	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3045 					       &cmd.dma);
3046 	if (!cmd.va) {
3047 		dev_err(&adapter->pdev->dev,
3048 				"Memory allocation failure\n");
3049 		status = -ENOMEM;
3050 		goto err;
3051 	}
3052 
3053 	wrb = wrb_from_mbox(adapter);
3054 	if (!wrb) {
3055 		status = -EBUSY;
3056 		goto err;
3057 	}
3058 
3059 	req = cmd.va;
3060 
3061 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3062 			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3063 			       payload_len, wrb, &cmd);
3064 
3065 	req->hdr.version = 1;
3066 	req->query_options = BE_GET_WOL_CAP;
3067 
3068 	status = be_mbox_notify_wait(adapter);
3069 	if (!status) {
3070 		struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3071 		resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
3072 
3073 		/* the command could succeed misleadingly on old f/w
3074 		 * which is not aware of the V1 version. fake an error. */
3075 		if (resp->hdr.response_length < payload_len) {
3076 			status = -1;
3077 			goto err;
3078 		}
3079 		adapter->wol_cap = resp->wol_settings;
3080 	}
3081 err:
3082 	mutex_unlock(&adapter->mbox_lock);
3083 	if (cmd.va)
3084 		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3085 	return status;
3086 
3087 }
3088 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3089 				   struct be_dma_mem *cmd)
3090 {
3091 	struct be_mcc_wrb *wrb;
3092 	struct be_cmd_req_get_ext_fat_caps *req;
3093 	int status;
3094 
3095 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3096 		return -1;
3097 
3098 	wrb = wrb_from_mbox(adapter);
3099 	if (!wrb) {
3100 		status = -EBUSY;
3101 		goto err;
3102 	}
3103 
3104 	req = cmd->va;
3105 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3106 			       OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3107 			       cmd->size, wrb, cmd);
3108 	req->parameter_type = cpu_to_le32(1);
3109 
3110 	status = be_mbox_notify_wait(adapter);
3111 err:
3112 	mutex_unlock(&adapter->mbox_lock);
3113 	return status;
3114 }
3115 
3116 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3117 				   struct be_dma_mem *cmd,
3118 				   struct be_fat_conf_params *configs)
3119 {
3120 	struct be_mcc_wrb *wrb;
3121 	struct be_cmd_req_set_ext_fat_caps *req;
3122 	int status;
3123 
3124 	spin_lock_bh(&adapter->mcc_lock);
3125 
3126 	wrb = wrb_from_mccq(adapter);
3127 	if (!wrb) {
3128 		status = -EBUSY;
3129 		goto err;
3130 	}
3131 
3132 	req = cmd->va;
3133 	memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3134 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3135 			       OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3136 			       cmd->size, wrb, cmd);
3137 
3138 	status = be_mcc_notify_wait(adapter);
3139 err:
3140 	spin_unlock_bh(&adapter->mcc_lock);
3141 	return status;
3142 }
3143 
3144 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
3145 {
3146 	struct be_mcc_wrb *wrb;
3147 	struct be_cmd_req_get_port_name *req;
3148 	int status;
3149 
3150 	if (!lancer_chip(adapter)) {
3151 		*port_name = adapter->hba_port_num + '0';
3152 		return 0;
3153 	}
3154 
3155 	spin_lock_bh(&adapter->mcc_lock);
3156 
3157 	wrb = wrb_from_mccq(adapter);
3158 	if (!wrb) {
3159 		status = -EBUSY;
3160 		goto err;
3161 	}
3162 
3163 	req = embedded_payload(wrb);
3164 
3165 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3166 			       OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3167 			       NULL);
3168 	req->hdr.version = 1;
3169 
3170 	status = be_mcc_notify_wait(adapter);
3171 	if (!status) {
3172 		struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3173 		*port_name = resp->port_name[adapter->hba_port_num];
3174 	} else {
3175 		*port_name = adapter->hba_port_num + '0';
3176 	}
3177 err:
3178 	spin_unlock_bh(&adapter->mcc_lock);
3179 	return status;
3180 }
3181 
3182 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
3183 {
3184 	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3185 	int i;
3186 
3187 	for (i = 0; i < desc_count; i++) {
3188 		if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3189 		    hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
3190 			return (struct be_nic_res_desc *)hdr;
3191 
3192 		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3193 		hdr = (void *)hdr + hdr->desc_len;
3194 	}
3195 	return NULL;
3196 }
3197 
3198 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3199 						 u32 desc_count)
3200 {
3201 	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3202 	struct be_pcie_res_desc *pcie;
3203 	int i;
3204 
3205 	for (i = 0; i < desc_count; i++) {
3206 		if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3207 		     hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3208 			pcie = (struct be_pcie_res_desc	*)hdr;
3209 			if (pcie->pf_num == devfn)
3210 				return pcie;
3211 		}
3212 
3213 		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3214 		hdr = (void *)hdr + hdr->desc_len;
3215 	}
3216 	return NULL;
3217 }
3218 
3219 static void be_copy_nic_desc(struct be_resources *res,
3220 			     struct be_nic_res_desc *desc)
3221 {
3222 	res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3223 	res->max_vlans = le16_to_cpu(desc->vlan_count);
3224 	res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3225 	res->max_tx_qs = le16_to_cpu(desc->txq_count);
3226 	res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3227 	res->max_rx_qs = le16_to_cpu(desc->rq_count);
3228 	res->max_evt_qs = le16_to_cpu(desc->eq_count);
3229 	/* Clear flags that driver is not interested in */
3230 	res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3231 				BE_IF_CAP_FLAGS_WANT;
3232 	/* Need 1 RXQ as the default RXQ */
3233 	if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
3234 		res->max_rss_qs -= 1;
3235 }
3236 
3237 /* Uses Mbox */
3238 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3239 {
3240 	struct be_mcc_wrb *wrb;
3241 	struct be_cmd_req_get_func_config *req;
3242 	int status;
3243 	struct be_dma_mem cmd;
3244 
3245 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3246 		return -1;
3247 
3248 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3249 	cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3250 	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3251 				      &cmd.dma);
3252 	if (!cmd.va) {
3253 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3254 		status = -ENOMEM;
3255 		goto err;
3256 	}
3257 
3258 	wrb = wrb_from_mbox(adapter);
3259 	if (!wrb) {
3260 		status = -EBUSY;
3261 		goto err;
3262 	}
3263 
3264 	req = cmd.va;
3265 
3266 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3267 			       OPCODE_COMMON_GET_FUNC_CONFIG,
3268 			       cmd.size, wrb, &cmd);
3269 
3270 	if (skyhawk_chip(adapter))
3271 		req->hdr.version = 1;
3272 
3273 	status = be_mbox_notify_wait(adapter);
3274 	if (!status) {
3275 		struct be_cmd_resp_get_func_config *resp = cmd.va;
3276 		u32 desc_count = le32_to_cpu(resp->desc_count);
3277 		struct be_nic_res_desc *desc;
3278 
3279 		desc = be_get_nic_desc(resp->func_param, desc_count);
3280 		if (!desc) {
3281 			status = -EINVAL;
3282 			goto err;
3283 		}
3284 
3285 		adapter->pf_number = desc->pf_num;
3286 		be_copy_nic_desc(res, desc);
3287 	}
3288 err:
3289 	mutex_unlock(&adapter->mbox_lock);
3290 	if (cmd.va)
3291 		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3292 	return status;
3293 }
3294 
3295 /* Uses mbox */
3296 static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3297 					u8 domain, struct be_dma_mem *cmd)
3298 {
3299 	struct be_mcc_wrb *wrb;
3300 	struct be_cmd_req_get_profile_config *req;
3301 	int status;
3302 
3303 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3304 		return -1;
3305 	wrb = wrb_from_mbox(adapter);
3306 
3307 	req = cmd->va;
3308 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3309 			       OPCODE_COMMON_GET_PROFILE_CONFIG,
3310 			       cmd->size, wrb, cmd);
3311 
3312 	req->type = ACTIVE_PROFILE_TYPE;
3313 	req->hdr.domain = domain;
3314 	if (!lancer_chip(adapter))
3315 		req->hdr.version = 1;
3316 
3317 	status = be_mbox_notify_wait(adapter);
3318 
3319 	mutex_unlock(&adapter->mbox_lock);
3320 	return status;
3321 }
3322 
3323 /* Uses sync mcc */
3324 static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
3325 					u8 domain, struct be_dma_mem *cmd)
3326 {
3327 	struct be_mcc_wrb *wrb;
3328 	struct be_cmd_req_get_profile_config *req;
3329 	int status;
3330 
3331 	spin_lock_bh(&adapter->mcc_lock);
3332 
3333 	wrb = wrb_from_mccq(adapter);
3334 	if (!wrb) {
3335 		status = -EBUSY;
3336 		goto err;
3337 	}
3338 
3339 	req = cmd->va;
3340 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3341 			       OPCODE_COMMON_GET_PROFILE_CONFIG,
3342 			       cmd->size, wrb, cmd);
3343 
3344 	req->type = ACTIVE_PROFILE_TYPE;
3345 	req->hdr.domain = domain;
3346 	if (!lancer_chip(adapter))
3347 		req->hdr.version = 1;
3348 
3349 	status = be_mcc_notify_wait(adapter);
3350 
3351 err:
3352 	spin_unlock_bh(&adapter->mcc_lock);
3353 	return status;
3354 }
3355 
3356 /* Uses sync mcc, if MCCQ is already created otherwise mbox */
3357 int be_cmd_get_profile_config(struct be_adapter *adapter,
3358 			      struct be_resources *res, u8 domain)
3359 {
3360 	struct be_cmd_resp_get_profile_config *resp;
3361 	struct be_pcie_res_desc *pcie;
3362 	struct be_nic_res_desc *nic;
3363 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
3364 	struct be_dma_mem cmd;
3365 	u32 desc_count;
3366 	int status;
3367 
3368 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3369 	cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3370 	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3371 	if (!cmd.va)
3372 		return -ENOMEM;
3373 
3374 	if (!mccq->created)
3375 		status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
3376 	else
3377 		status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
3378 	if (status)
3379 		goto err;
3380 
3381 	resp = cmd.va;
3382 	desc_count = le32_to_cpu(resp->desc_count);
3383 
3384 	pcie =  be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3385 				 desc_count);
3386 	if (pcie)
3387 		res->max_vfs = le16_to_cpu(pcie->num_vfs);
3388 
3389 	nic = be_get_nic_desc(resp->func_param, desc_count);
3390 	if (nic)
3391 		be_copy_nic_desc(res, nic);
3392 
3393 err:
3394 	if (cmd.va)
3395 		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3396 	return status;
3397 }
3398 
3399 /* Currently only Lancer uses this command and it supports version 0 only
3400  * Uses sync mcc
3401  */
3402 int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3403 			      u8 domain)
3404 {
3405 	struct be_mcc_wrb *wrb;
3406 	struct be_cmd_req_set_profile_config *req;
3407 	int status;
3408 
3409 	spin_lock_bh(&adapter->mcc_lock);
3410 
3411 	wrb = wrb_from_mccq(adapter);
3412 	if (!wrb) {
3413 		status = -EBUSY;
3414 		goto err;
3415 	}
3416 
3417 	req = embedded_payload(wrb);
3418 
3419 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3420 			       OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3421 			       wrb, NULL);
3422 	req->hdr.domain = domain;
3423 	req->desc_count = cpu_to_le32(1);
3424 	req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3425 	req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3426 	req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3427 	req->nic_desc.pf_num = adapter->pf_number;
3428 	req->nic_desc.vf_num = domain;
3429 
3430 	/* Mark fields invalid */
3431 	req->nic_desc.unicast_mac_count = 0xFFFF;
3432 	req->nic_desc.mcc_count = 0xFFFF;
3433 	req->nic_desc.vlan_count = 0xFFFF;
3434 	req->nic_desc.mcast_mac_count = 0xFFFF;
3435 	req->nic_desc.txq_count = 0xFFFF;
3436 	req->nic_desc.rq_count = 0xFFFF;
3437 	req->nic_desc.rssq_count = 0xFFFF;
3438 	req->nic_desc.lro_count = 0xFFFF;
3439 	req->nic_desc.cq_count = 0xFFFF;
3440 	req->nic_desc.toe_conn_count = 0xFFFF;
3441 	req->nic_desc.eq_count = 0xFFFF;
3442 	req->nic_desc.link_param = 0xFF;
3443 	req->nic_desc.bw_min = 0xFFFFFFFF;
3444 	req->nic_desc.acpi_params = 0xFF;
3445 	req->nic_desc.wol_param = 0x0F;
3446 
3447 	/* Change BW */
3448 	req->nic_desc.bw_min = cpu_to_le32(bps);
3449 	req->nic_desc.bw_max = cpu_to_le32(bps);
3450 	status = be_mcc_notify_wait(adapter);
3451 err:
3452 	spin_unlock_bh(&adapter->mcc_lock);
3453 	return status;
3454 }
3455 
3456 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3457 		     int vf_num)
3458 {
3459 	struct be_mcc_wrb *wrb;
3460 	struct be_cmd_req_get_iface_list *req;
3461 	struct be_cmd_resp_get_iface_list *resp;
3462 	int status;
3463 
3464 	spin_lock_bh(&adapter->mcc_lock);
3465 
3466 	wrb = wrb_from_mccq(adapter);
3467 	if (!wrb) {
3468 		status = -EBUSY;
3469 		goto err;
3470 	}
3471 	req = embedded_payload(wrb);
3472 
3473 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3474 			       OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3475 			       wrb, NULL);
3476 	req->hdr.domain = vf_num + 1;
3477 
3478 	status = be_mcc_notify_wait(adapter);
3479 	if (!status) {
3480 		resp = (struct be_cmd_resp_get_iface_list *)req;
3481 		vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3482 	}
3483 
3484 err:
3485 	spin_unlock_bh(&adapter->mcc_lock);
3486 	return status;
3487 }
3488 
3489 static int lancer_wait_idle(struct be_adapter *adapter)
3490 {
3491 #define SLIPORT_IDLE_TIMEOUT 30
3492 	u32 reg_val;
3493 	int status = 0, i;
3494 
3495 	for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3496 		reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3497 		if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3498 			break;
3499 
3500 		ssleep(1);
3501 	}
3502 
3503 	if (i == SLIPORT_IDLE_TIMEOUT)
3504 		status = -1;
3505 
3506 	return status;
3507 }
3508 
3509 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
3510 {
3511 	int status = 0;
3512 
3513 	status = lancer_wait_idle(adapter);
3514 	if (status)
3515 		return status;
3516 
3517 	iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
3518 
3519 	return status;
3520 }
3521 
3522 /* Routine to check whether dump image is present or not */
3523 bool dump_present(struct be_adapter *adapter)
3524 {
3525 	u32 sliport_status = 0;
3526 
3527 	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3528 	return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
3529 }
3530 
3531 int lancer_initiate_dump(struct be_adapter *adapter)
3532 {
3533 	int status;
3534 
3535 	/* give firmware reset and diagnostic dump */
3536 	status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
3537 				     PHYSDEV_CONTROL_DD_MASK);
3538 	if (status < 0) {
3539 		dev_err(&adapter->pdev->dev, "Firmware reset failed\n");
3540 		return status;
3541 	}
3542 
3543 	status = lancer_wait_idle(adapter);
3544 	if (status)
3545 		return status;
3546 
3547 	if (!dump_present(adapter)) {
3548 		dev_err(&adapter->pdev->dev, "Dump image not present\n");
3549 		return -1;
3550 	}
3551 
3552 	return 0;
3553 }
3554 
3555 /* Uses sync mcc */
3556 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3557 {
3558 	struct be_mcc_wrb *wrb;
3559 	struct be_cmd_enable_disable_vf *req;
3560 	int status;
3561 
3562 	if (BEx_chip(adapter))
3563 		return 0;
3564 
3565 	spin_lock_bh(&adapter->mcc_lock);
3566 
3567 	wrb = wrb_from_mccq(adapter);
3568 	if (!wrb) {
3569 		status = -EBUSY;
3570 		goto err;
3571 	}
3572 
3573 	req = embedded_payload(wrb);
3574 
3575 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3576 			       OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3577 			       wrb, NULL);
3578 
3579 	req->hdr.domain = domain;
3580 	req->enable = 1;
3581 	status = be_mcc_notify_wait(adapter);
3582 err:
3583 	spin_unlock_bh(&adapter->mcc_lock);
3584 	return status;
3585 }
3586 
3587 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
3588 {
3589 	struct be_mcc_wrb *wrb;
3590 	struct be_cmd_req_intr_set *req;
3591 	int status;
3592 
3593 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3594 		return -1;
3595 
3596 	wrb = wrb_from_mbox(adapter);
3597 
3598 	req = embedded_payload(wrb);
3599 
3600 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3601 			       OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
3602 			       wrb, NULL);
3603 
3604 	req->intr_enabled = intr_enable;
3605 
3606 	status = be_mbox_notify_wait(adapter);
3607 
3608 	mutex_unlock(&adapter->mbox_lock);
3609 	return status;
3610 }
3611 
3612 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3613 			int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3614 {
3615 	struct be_adapter *adapter = netdev_priv(netdev_handle);
3616 	struct be_mcc_wrb *wrb;
3617 	struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
3618 	struct be_cmd_req_hdr *req;
3619 	struct be_cmd_resp_hdr *resp;
3620 	int status;
3621 
3622 	spin_lock_bh(&adapter->mcc_lock);
3623 
3624 	wrb = wrb_from_mccq(adapter);
3625 	if (!wrb) {
3626 		status = -EBUSY;
3627 		goto err;
3628 	}
3629 	req = embedded_payload(wrb);
3630 	resp = embedded_payload(wrb);
3631 
3632 	be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
3633 			       hdr->opcode, wrb_payload_size, wrb, NULL);
3634 	memcpy(req, wrb_payload, wrb_payload_size);
3635 	be_dws_cpu_to_le(req, wrb_payload_size);
3636 
3637 	status = be_mcc_notify_wait(adapter);
3638 	if (cmd_status)
3639 		*cmd_status = (status & 0xffff);
3640 	if (ext_status)
3641 		*ext_status = 0;
3642 	memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
3643 	be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
3644 err:
3645 	spin_unlock_bh(&adapter->mcc_lock);
3646 	return status;
3647 }
3648 EXPORT_SYMBOL(be_roce_mcc_cmd);
3649