1 /*
2  * Copyright (C) 2005 - 2014 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17 
18 #include <linux/module.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 
22 static char *be_port_misconfig_evt_desc[] = {
23 	"A valid SFP module detected",
24 	"Optics faulted/ incorrectly installed/ not installed.",
25 	"Optics of two types installed.",
26 	"Incompatible optics.",
27 	"Unknown port SFP status"
28 };
29 
30 static char *be_port_misconfig_remedy_desc[] = {
31 	"",
32 	"Reseat optics. If issue not resolved, replace",
33 	"Remove one optic or install matching pair of optics",
34 	"Replace with compatible optics for card to function",
35 	""
36 };
37 
38 static struct be_cmd_priv_map cmd_priv_map[] = {
39 	{
40 		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
41 		CMD_SUBSYSTEM_ETH,
42 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
43 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
44 	},
45 	{
46 		OPCODE_COMMON_GET_FLOW_CONTROL,
47 		CMD_SUBSYSTEM_COMMON,
48 		BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
49 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
50 	},
51 	{
52 		OPCODE_COMMON_SET_FLOW_CONTROL,
53 		CMD_SUBSYSTEM_COMMON,
54 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
55 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
56 	},
57 	{
58 		OPCODE_ETH_GET_PPORT_STATS,
59 		CMD_SUBSYSTEM_ETH,
60 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
61 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
62 	},
63 	{
64 		OPCODE_COMMON_GET_PHY_DETAILS,
65 		CMD_SUBSYSTEM_COMMON,
66 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
67 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
68 	}
69 };
70 
71 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
72 {
73 	int i;
74 	int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
75 	u32 cmd_privileges = adapter->cmd_privileges;
76 
77 	for (i = 0; i < num_entries; i++)
78 		if (opcode == cmd_priv_map[i].opcode &&
79 		    subsystem == cmd_priv_map[i].subsystem)
80 			if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
81 				return false;
82 
83 	return true;
84 }
85 
86 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
87 {
88 	return wrb->payload.embedded_payload;
89 }
90 
91 static void be_mcc_notify(struct be_adapter *adapter)
92 {
93 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
94 	u32 val = 0;
95 
96 	if (be_error(adapter))
97 		return;
98 
99 	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
100 	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
101 
102 	wmb();
103 	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
104 }
105 
106 /* To check if valid bit is set, check the entire word as we don't know
107  * the endianness of the data (old entry is host endian while a new entry is
108  * little endian) */
109 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
110 {
111 	u32 flags;
112 
113 	if (compl->flags != 0) {
114 		flags = le32_to_cpu(compl->flags);
115 		if (flags & CQE_FLAGS_VALID_MASK) {
116 			compl->flags = flags;
117 			return true;
118 		}
119 	}
120 	return false;
121 }
122 
123 /* Need to reset the entire word that houses the valid bit */
124 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
125 {
126 	compl->flags = 0;
127 }
128 
129 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
130 {
131 	unsigned long addr;
132 
133 	addr = tag1;
134 	addr = ((addr << 16) << 16) | tag0;
135 	return (void *)addr;
136 }
137 
138 static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
139 {
140 	if (base_status == MCC_STATUS_NOT_SUPPORTED ||
141 	    base_status == MCC_STATUS_ILLEGAL_REQUEST ||
142 	    addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
143 	    (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
144 	    (base_status == MCC_STATUS_ILLEGAL_FIELD ||
145 	     addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
146 		return true;
147 	else
148 		return false;
149 }
150 
151 /* Place holder for all the async MCC cmds wherein the caller is not in a busy
152  * loop (has not issued be_mcc_notify_wait())
153  */
154 static void be_async_cmd_process(struct be_adapter *adapter,
155 				 struct be_mcc_compl *compl,
156 				 struct be_cmd_resp_hdr *resp_hdr)
157 {
158 	enum mcc_base_status base_status = base_status(compl->status);
159 	u8 opcode = 0, subsystem = 0;
160 
161 	if (resp_hdr) {
162 		opcode = resp_hdr->opcode;
163 		subsystem = resp_hdr->subsystem;
164 	}
165 
166 	if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
167 	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
168 		complete(&adapter->et_cmd_compl);
169 		return;
170 	}
171 
172 	if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
173 	     opcode == OPCODE_COMMON_WRITE_OBJECT) &&
174 	    subsystem == CMD_SUBSYSTEM_COMMON) {
175 		adapter->flash_status = compl->status;
176 		complete(&adapter->et_cmd_compl);
177 		return;
178 	}
179 
180 	if ((opcode == OPCODE_ETH_GET_STATISTICS ||
181 	     opcode == OPCODE_ETH_GET_PPORT_STATS) &&
182 	    subsystem == CMD_SUBSYSTEM_ETH &&
183 	    base_status == MCC_STATUS_SUCCESS) {
184 		be_parse_stats(adapter);
185 		adapter->stats_cmd_sent = false;
186 		return;
187 	}
188 
189 	if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
190 	    subsystem == CMD_SUBSYSTEM_COMMON) {
191 		if (base_status == MCC_STATUS_SUCCESS) {
192 			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
193 							(void *)resp_hdr;
194 			adapter->drv_stats.be_on_die_temperature =
195 						resp->on_die_temperature;
196 		} else {
197 			adapter->be_get_temp_freq = 0;
198 		}
199 		return;
200 	}
201 }
202 
203 static int be_mcc_compl_process(struct be_adapter *adapter,
204 				struct be_mcc_compl *compl)
205 {
206 	enum mcc_base_status base_status;
207 	enum mcc_addl_status addl_status;
208 	struct be_cmd_resp_hdr *resp_hdr;
209 	u8 opcode = 0, subsystem = 0;
210 
211 	/* Just swap the status to host endian; mcc tag is opaquely copied
212 	 * from mcc_wrb */
213 	be_dws_le_to_cpu(compl, 4);
214 
215 	base_status = base_status(compl->status);
216 	addl_status = addl_status(compl->status);
217 
218 	resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
219 	if (resp_hdr) {
220 		opcode = resp_hdr->opcode;
221 		subsystem = resp_hdr->subsystem;
222 	}
223 
224 	be_async_cmd_process(adapter, compl, resp_hdr);
225 
226 	if (base_status != MCC_STATUS_SUCCESS &&
227 	    !be_skip_err_log(opcode, base_status, addl_status)) {
228 		if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
229 			dev_warn(&adapter->pdev->dev,
230 				 "VF is not privileged to issue opcode %d-%d\n",
231 				 opcode, subsystem);
232 		} else {
233 			dev_err(&adapter->pdev->dev,
234 				"opcode %d-%d failed:status %d-%d\n",
235 				opcode, subsystem, base_status, addl_status);
236 		}
237 	}
238 	return compl->status;
239 }
240 
241 /* Link state evt is a string of bytes; no need for endian swapping */
242 static void be_async_link_state_process(struct be_adapter *adapter,
243 					struct be_mcc_compl *compl)
244 {
245 	struct be_async_event_link_state *evt =
246 			(struct be_async_event_link_state *)compl;
247 
248 	/* When link status changes, link speed must be re-queried from FW */
249 	adapter->phy.link_speed = -1;
250 
251 	/* On BEx the FW does not send a separate link status
252 	 * notification for physical and logical link.
253 	 * On other chips just process the logical link
254 	 * status notification
255 	 */
256 	if (!BEx_chip(adapter) &&
257 	    !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
258 		return;
259 
260 	/* For the initial link status do not rely on the ASYNC event as
261 	 * it may not be received in some cases.
262 	 */
263 	if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
264 		be_link_status_update(adapter,
265 				      evt->port_link_status & LINK_STATUS_MASK);
266 }
267 
268 static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
269 						  struct be_mcc_compl *compl)
270 {
271 	struct be_async_event_misconfig_port *evt =
272 			(struct be_async_event_misconfig_port *)compl;
273 	u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1);
274 	struct device *dev = &adapter->pdev->dev;
275 	u8 port_misconfig_evt;
276 
277 	port_misconfig_evt =
278 		((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff);
279 
280 	/* Log an error message that would allow a user to determine
281 	 * whether the SFPs have an issue
282 	 */
283 	dev_info(dev, "Port %c: %s %s", adapter->port_name,
284 		 be_port_misconfig_evt_desc[port_misconfig_evt],
285 		 be_port_misconfig_remedy_desc[port_misconfig_evt]);
286 
287 	if (port_misconfig_evt == INCOMPATIBLE_SFP)
288 		adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP;
289 }
290 
291 /* Grp5 CoS Priority evt */
292 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
293 					       struct be_mcc_compl *compl)
294 {
295 	struct be_async_event_grp5_cos_priority *evt =
296 			(struct be_async_event_grp5_cos_priority *)compl;
297 
298 	if (evt->valid) {
299 		adapter->vlan_prio_bmap = evt->available_priority_bmap;
300 		adapter->recommended_prio &= ~VLAN_PRIO_MASK;
301 		adapter->recommended_prio =
302 			evt->reco_default_priority << VLAN_PRIO_SHIFT;
303 	}
304 }
305 
306 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
307 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
308 					    struct be_mcc_compl *compl)
309 {
310 	struct be_async_event_grp5_qos_link_speed *evt =
311 			(struct be_async_event_grp5_qos_link_speed *)compl;
312 
313 	if (adapter->phy.link_speed >= 0 &&
314 	    evt->physical_port == adapter->port_num)
315 		adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
316 }
317 
318 /*Grp5 PVID evt*/
319 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
320 					     struct be_mcc_compl *compl)
321 {
322 	struct be_async_event_grp5_pvid_state *evt =
323 			(struct be_async_event_grp5_pvid_state *)compl;
324 
325 	if (evt->enabled) {
326 		adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
327 		dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
328 	} else {
329 		adapter->pvid = 0;
330 	}
331 }
332 
333 static void be_async_grp5_evt_process(struct be_adapter *adapter,
334 				      struct be_mcc_compl *compl)
335 {
336 	u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
337 				ASYNC_EVENT_TYPE_MASK;
338 
339 	switch (event_type) {
340 	case ASYNC_EVENT_COS_PRIORITY:
341 		be_async_grp5_cos_priority_process(adapter, compl);
342 		break;
343 	case ASYNC_EVENT_QOS_SPEED:
344 		be_async_grp5_qos_speed_process(adapter, compl);
345 		break;
346 	case ASYNC_EVENT_PVID_STATE:
347 		be_async_grp5_pvid_state_process(adapter, compl);
348 		break;
349 	default:
350 		break;
351 	}
352 }
353 
354 static void be_async_dbg_evt_process(struct be_adapter *adapter,
355 				     struct be_mcc_compl *cmp)
356 {
357 	u8 event_type = 0;
358 	struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
359 
360 	event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
361 			ASYNC_EVENT_TYPE_MASK;
362 
363 	switch (event_type) {
364 	case ASYNC_DEBUG_EVENT_TYPE_QNQ:
365 		if (evt->valid)
366 			adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
367 		adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
368 	break;
369 	default:
370 		dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
371 			 event_type);
372 	break;
373 	}
374 }
375 
376 static void be_async_sliport_evt_process(struct be_adapter *adapter,
377 					 struct be_mcc_compl *cmp)
378 {
379 	u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
380 			ASYNC_EVENT_TYPE_MASK;
381 
382 	if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
383 		be_async_port_misconfig_event_process(adapter, cmp);
384 }
385 
386 static inline bool is_link_state_evt(u32 flags)
387 {
388 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
389 			ASYNC_EVENT_CODE_LINK_STATE;
390 }
391 
392 static inline bool is_grp5_evt(u32 flags)
393 {
394 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
395 			ASYNC_EVENT_CODE_GRP_5;
396 }
397 
398 static inline bool is_dbg_evt(u32 flags)
399 {
400 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
401 			ASYNC_EVENT_CODE_QNQ;
402 }
403 
404 static inline bool is_sliport_evt(u32 flags)
405 {
406 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
407 		ASYNC_EVENT_CODE_SLIPORT;
408 }
409 
410 static void be_mcc_event_process(struct be_adapter *adapter,
411 				 struct be_mcc_compl *compl)
412 {
413 	if (is_link_state_evt(compl->flags))
414 		be_async_link_state_process(adapter, compl);
415 	else if (is_grp5_evt(compl->flags))
416 		be_async_grp5_evt_process(adapter, compl);
417 	else if (is_dbg_evt(compl->flags))
418 		be_async_dbg_evt_process(adapter, compl);
419 	else if (is_sliport_evt(compl->flags))
420 		be_async_sliport_evt_process(adapter, compl);
421 }
422 
423 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
424 {
425 	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
426 	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
427 
428 	if (be_mcc_compl_is_new(compl)) {
429 		queue_tail_inc(mcc_cq);
430 		return compl;
431 	}
432 	return NULL;
433 }
434 
435 void be_async_mcc_enable(struct be_adapter *adapter)
436 {
437 	spin_lock_bh(&adapter->mcc_cq_lock);
438 
439 	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
440 	adapter->mcc_obj.rearm_cq = true;
441 
442 	spin_unlock_bh(&adapter->mcc_cq_lock);
443 }
444 
445 void be_async_mcc_disable(struct be_adapter *adapter)
446 {
447 	spin_lock_bh(&adapter->mcc_cq_lock);
448 
449 	adapter->mcc_obj.rearm_cq = false;
450 	be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
451 
452 	spin_unlock_bh(&adapter->mcc_cq_lock);
453 }
454 
455 int be_process_mcc(struct be_adapter *adapter)
456 {
457 	struct be_mcc_compl *compl;
458 	int num = 0, status = 0;
459 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
460 
461 	spin_lock(&adapter->mcc_cq_lock);
462 
463 	while ((compl = be_mcc_compl_get(adapter))) {
464 		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
465 			be_mcc_event_process(adapter, compl);
466 		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
467 			status = be_mcc_compl_process(adapter, compl);
468 			atomic_dec(&mcc_obj->q.used);
469 		}
470 		be_mcc_compl_use(compl);
471 		num++;
472 	}
473 
474 	if (num)
475 		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
476 
477 	spin_unlock(&adapter->mcc_cq_lock);
478 	return status;
479 }
480 
481 /* Wait till no more pending mcc requests are present */
482 static int be_mcc_wait_compl(struct be_adapter *adapter)
483 {
484 #define mcc_timeout		120000 /* 12s timeout */
485 	int i, status = 0;
486 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
487 
488 	for (i = 0; i < mcc_timeout; i++) {
489 		if (be_error(adapter))
490 			return -EIO;
491 
492 		local_bh_disable();
493 		status = be_process_mcc(adapter);
494 		local_bh_enable();
495 
496 		if (atomic_read(&mcc_obj->q.used) == 0)
497 			break;
498 		udelay(100);
499 	}
500 	if (i == mcc_timeout) {
501 		dev_err(&adapter->pdev->dev, "FW not responding\n");
502 		adapter->fw_timeout = true;
503 		return -EIO;
504 	}
505 	return status;
506 }
507 
508 /* Notify MCC requests and wait for completion */
509 static int be_mcc_notify_wait(struct be_adapter *adapter)
510 {
511 	int status;
512 	struct be_mcc_wrb *wrb;
513 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
514 	u16 index = mcc_obj->q.head;
515 	struct be_cmd_resp_hdr *resp;
516 
517 	index_dec(&index, mcc_obj->q.len);
518 	wrb = queue_index_node(&mcc_obj->q, index);
519 
520 	resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
521 
522 	be_mcc_notify(adapter);
523 
524 	status = be_mcc_wait_compl(adapter);
525 	if (status == -EIO)
526 		goto out;
527 
528 	status = (resp->base_status |
529 		  ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
530 		   CQE_ADDL_STATUS_SHIFT));
531 out:
532 	return status;
533 }
534 
535 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
536 {
537 	int msecs = 0;
538 	u32 ready;
539 
540 	do {
541 		if (be_error(adapter))
542 			return -EIO;
543 
544 		ready = ioread32(db);
545 		if (ready == 0xffffffff)
546 			return -1;
547 
548 		ready &= MPU_MAILBOX_DB_RDY_MASK;
549 		if (ready)
550 			break;
551 
552 		if (msecs > 4000) {
553 			dev_err(&adapter->pdev->dev, "FW not responding\n");
554 			adapter->fw_timeout = true;
555 			be_detect_error(adapter);
556 			return -1;
557 		}
558 
559 		msleep(1);
560 		msecs++;
561 	} while (true);
562 
563 	return 0;
564 }
565 
566 /*
567  * Insert the mailbox address into the doorbell in two steps
568  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
569  */
570 static int be_mbox_notify_wait(struct be_adapter *adapter)
571 {
572 	int status;
573 	u32 val = 0;
574 	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
575 	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
576 	struct be_mcc_mailbox *mbox = mbox_mem->va;
577 	struct be_mcc_compl *compl = &mbox->compl;
578 
579 	/* wait for ready to be set */
580 	status = be_mbox_db_ready_wait(adapter, db);
581 	if (status != 0)
582 		return status;
583 
584 	val |= MPU_MAILBOX_DB_HI_MASK;
585 	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
586 	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
587 	iowrite32(val, db);
588 
589 	/* wait for ready to be set */
590 	status = be_mbox_db_ready_wait(adapter, db);
591 	if (status != 0)
592 		return status;
593 
594 	val = 0;
595 	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
596 	val |= (u32)(mbox_mem->dma >> 4) << 2;
597 	iowrite32(val, db);
598 
599 	status = be_mbox_db_ready_wait(adapter, db);
600 	if (status != 0)
601 		return status;
602 
603 	/* A cq entry has been made now */
604 	if (be_mcc_compl_is_new(compl)) {
605 		status = be_mcc_compl_process(adapter, &mbox->compl);
606 		be_mcc_compl_use(compl);
607 		if (status)
608 			return status;
609 	} else {
610 		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
611 		return -1;
612 	}
613 	return 0;
614 }
615 
616 static u16 be_POST_stage_get(struct be_adapter *adapter)
617 {
618 	u32 sem;
619 
620 	if (BEx_chip(adapter))
621 		sem  = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
622 	else
623 		pci_read_config_dword(adapter->pdev,
624 				      SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
625 
626 	return sem & POST_STAGE_MASK;
627 }
628 
629 static int lancer_wait_ready(struct be_adapter *adapter)
630 {
631 #define SLIPORT_READY_TIMEOUT 30
632 	u32 sliport_status;
633 	int i;
634 
635 	for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
636 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
637 		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
638 			break;
639 
640 		msleep(1000);
641 	}
642 
643 	if (i == SLIPORT_READY_TIMEOUT)
644 		return sliport_status ? : -1;
645 
646 	return 0;
647 }
648 
649 static bool lancer_provisioning_error(struct be_adapter *adapter)
650 {
651 	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
652 
653 	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
654 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
655 		sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
656 		sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
657 
658 		if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
659 		    sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
660 			return true;
661 	}
662 	return false;
663 }
664 
665 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
666 {
667 	int status;
668 	u32 sliport_status, err, reset_needed;
669 	bool resource_error;
670 
671 	resource_error = lancer_provisioning_error(adapter);
672 	if (resource_error)
673 		return -EAGAIN;
674 
675 	status = lancer_wait_ready(adapter);
676 	if (!status) {
677 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
678 		err = sliport_status & SLIPORT_STATUS_ERR_MASK;
679 		reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
680 		if (err && reset_needed) {
681 			iowrite32(SLI_PORT_CONTROL_IP_MASK,
682 				  adapter->db + SLIPORT_CONTROL_OFFSET);
683 
684 			/* check if adapter has corrected the error */
685 			status = lancer_wait_ready(adapter);
686 			sliport_status = ioread32(adapter->db +
687 						  SLIPORT_STATUS_OFFSET);
688 			sliport_status &= (SLIPORT_STATUS_ERR_MASK |
689 						SLIPORT_STATUS_RN_MASK);
690 			if (status || sliport_status)
691 				status = -1;
692 		} else if (err || reset_needed) {
693 			status = -1;
694 		}
695 	}
696 	/* Stop error recovery if error is not recoverable.
697 	 * No resource error is temporary errors and will go away
698 	 * when PF provisions resources.
699 	 */
700 	resource_error = lancer_provisioning_error(adapter);
701 	if (resource_error)
702 		status = -EAGAIN;
703 
704 	return status;
705 }
706 
707 int be_fw_wait_ready(struct be_adapter *adapter)
708 {
709 	u16 stage;
710 	int status, timeout = 0;
711 	struct device *dev = &adapter->pdev->dev;
712 
713 	if (lancer_chip(adapter)) {
714 		status = lancer_wait_ready(adapter);
715 		if (status) {
716 			stage = status;
717 			goto err;
718 		}
719 		return 0;
720 	}
721 
722 	do {
723 		stage = be_POST_stage_get(adapter);
724 		if (stage == POST_STAGE_ARMFW_RDY)
725 			return 0;
726 
727 		dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
728 		if (msleep_interruptible(2000)) {
729 			dev_err(dev, "Waiting for POST aborted\n");
730 			return -EINTR;
731 		}
732 		timeout += 2;
733 	} while (timeout < 60);
734 
735 err:
736 	dev_err(dev, "POST timeout; stage=%#x\n", stage);
737 	return -1;
738 }
739 
740 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
741 {
742 	return &wrb->payload.sgl[0];
743 }
744 
745 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
746 {
747 	wrb->tag0 = addr & 0xFFFFFFFF;
748 	wrb->tag1 = upper_32_bits(addr);
749 }
750 
751 /* Don't touch the hdr after it's prepared */
752 /* mem will be NULL for embedded commands */
753 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
754 				   u8 subsystem, u8 opcode, int cmd_len,
755 				   struct be_mcc_wrb *wrb,
756 				   struct be_dma_mem *mem)
757 {
758 	struct be_sge *sge;
759 
760 	req_hdr->opcode = opcode;
761 	req_hdr->subsystem = subsystem;
762 	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
763 	req_hdr->version = 0;
764 	fill_wrb_tags(wrb, (ulong) req_hdr);
765 	wrb->payload_length = cmd_len;
766 	if (mem) {
767 		wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
768 			MCC_WRB_SGE_CNT_SHIFT;
769 		sge = nonembedded_sgl(wrb);
770 		sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
771 		sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
772 		sge->len = cpu_to_le32(mem->size);
773 	} else
774 		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
775 	be_dws_cpu_to_le(wrb, 8);
776 }
777 
778 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
779 				      struct be_dma_mem *mem)
780 {
781 	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
782 	u64 dma = (u64)mem->dma;
783 
784 	for (i = 0; i < buf_pages; i++) {
785 		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
786 		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
787 		dma += PAGE_SIZE_4K;
788 	}
789 }
790 
791 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
792 {
793 	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
794 	struct be_mcc_wrb *wrb
795 		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
796 	memset(wrb, 0, sizeof(*wrb));
797 	return wrb;
798 }
799 
800 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
801 {
802 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
803 	struct be_mcc_wrb *wrb;
804 
805 	if (!mccq->created)
806 		return NULL;
807 
808 	if (atomic_read(&mccq->used) >= mccq->len)
809 		return NULL;
810 
811 	wrb = queue_head_node(mccq);
812 	queue_head_inc(mccq);
813 	atomic_inc(&mccq->used);
814 	memset(wrb, 0, sizeof(*wrb));
815 	return wrb;
816 }
817 
818 static bool use_mcc(struct be_adapter *adapter)
819 {
820 	return adapter->mcc_obj.q.created;
821 }
822 
823 /* Must be used only in process context */
824 static int be_cmd_lock(struct be_adapter *adapter)
825 {
826 	if (use_mcc(adapter)) {
827 		spin_lock_bh(&adapter->mcc_lock);
828 		return 0;
829 	} else {
830 		return mutex_lock_interruptible(&adapter->mbox_lock);
831 	}
832 }
833 
834 /* Must be used only in process context */
835 static void be_cmd_unlock(struct be_adapter *adapter)
836 {
837 	if (use_mcc(adapter))
838 		spin_unlock_bh(&adapter->mcc_lock);
839 	else
840 		return mutex_unlock(&adapter->mbox_lock);
841 }
842 
843 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
844 				      struct be_mcc_wrb *wrb)
845 {
846 	struct be_mcc_wrb *dest_wrb;
847 
848 	if (use_mcc(adapter)) {
849 		dest_wrb = wrb_from_mccq(adapter);
850 		if (!dest_wrb)
851 			return NULL;
852 	} else {
853 		dest_wrb = wrb_from_mbox(adapter);
854 	}
855 
856 	memcpy(dest_wrb, wrb, sizeof(*wrb));
857 	if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
858 		fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
859 
860 	return dest_wrb;
861 }
862 
863 /* Must be used only in process context */
864 static int be_cmd_notify_wait(struct be_adapter *adapter,
865 			      struct be_mcc_wrb *wrb)
866 {
867 	struct be_mcc_wrb *dest_wrb;
868 	int status;
869 
870 	status = be_cmd_lock(adapter);
871 	if (status)
872 		return status;
873 
874 	dest_wrb = be_cmd_copy(adapter, wrb);
875 	if (!dest_wrb)
876 		return -EBUSY;
877 
878 	if (use_mcc(adapter))
879 		status = be_mcc_notify_wait(adapter);
880 	else
881 		status = be_mbox_notify_wait(adapter);
882 
883 	if (!status)
884 		memcpy(wrb, dest_wrb, sizeof(*wrb));
885 
886 	be_cmd_unlock(adapter);
887 	return status;
888 }
889 
890 /* Tell fw we're about to start firing cmds by writing a
891  * special pattern across the wrb hdr; uses mbox
892  */
893 int be_cmd_fw_init(struct be_adapter *adapter)
894 {
895 	u8 *wrb;
896 	int status;
897 
898 	if (lancer_chip(adapter))
899 		return 0;
900 
901 	if (mutex_lock_interruptible(&adapter->mbox_lock))
902 		return -1;
903 
904 	wrb = (u8 *)wrb_from_mbox(adapter);
905 	*wrb++ = 0xFF;
906 	*wrb++ = 0x12;
907 	*wrb++ = 0x34;
908 	*wrb++ = 0xFF;
909 	*wrb++ = 0xFF;
910 	*wrb++ = 0x56;
911 	*wrb++ = 0x78;
912 	*wrb = 0xFF;
913 
914 	status = be_mbox_notify_wait(adapter);
915 
916 	mutex_unlock(&adapter->mbox_lock);
917 	return status;
918 }
919 
920 /* Tell fw we're done with firing cmds by writing a
921  * special pattern across the wrb hdr; uses mbox
922  */
923 int be_cmd_fw_clean(struct be_adapter *adapter)
924 {
925 	u8 *wrb;
926 	int status;
927 
928 	if (lancer_chip(adapter))
929 		return 0;
930 
931 	if (mutex_lock_interruptible(&adapter->mbox_lock))
932 		return -1;
933 
934 	wrb = (u8 *)wrb_from_mbox(adapter);
935 	*wrb++ = 0xFF;
936 	*wrb++ = 0xAA;
937 	*wrb++ = 0xBB;
938 	*wrb++ = 0xFF;
939 	*wrb++ = 0xFF;
940 	*wrb++ = 0xCC;
941 	*wrb++ = 0xDD;
942 	*wrb = 0xFF;
943 
944 	status = be_mbox_notify_wait(adapter);
945 
946 	mutex_unlock(&adapter->mbox_lock);
947 	return status;
948 }
949 
950 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
951 {
952 	struct be_mcc_wrb *wrb;
953 	struct be_cmd_req_eq_create *req;
954 	struct be_dma_mem *q_mem = &eqo->q.dma_mem;
955 	int status, ver = 0;
956 
957 	if (mutex_lock_interruptible(&adapter->mbox_lock))
958 		return -1;
959 
960 	wrb = wrb_from_mbox(adapter);
961 	req = embedded_payload(wrb);
962 
963 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
964 			       OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
965 			       NULL);
966 
967 	/* Support for EQ_CREATEv2 available only SH-R onwards */
968 	if (!(BEx_chip(adapter) || lancer_chip(adapter)))
969 		ver = 2;
970 
971 	req->hdr.version = ver;
972 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
973 
974 	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
975 	/* 4byte eqe*/
976 	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
977 	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
978 		      __ilog2_u32(eqo->q.len / 256));
979 	be_dws_cpu_to_le(req->context, sizeof(req->context));
980 
981 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
982 
983 	status = be_mbox_notify_wait(adapter);
984 	if (!status) {
985 		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
986 
987 		eqo->q.id = le16_to_cpu(resp->eq_id);
988 		eqo->msix_idx =
989 			(ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
990 		eqo->q.created = true;
991 	}
992 
993 	mutex_unlock(&adapter->mbox_lock);
994 	return status;
995 }
996 
997 /* Use MCC */
998 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
999 			  bool permanent, u32 if_handle, u32 pmac_id)
1000 {
1001 	struct be_mcc_wrb *wrb;
1002 	struct be_cmd_req_mac_query *req;
1003 	int status;
1004 
1005 	spin_lock_bh(&adapter->mcc_lock);
1006 
1007 	wrb = wrb_from_mccq(adapter);
1008 	if (!wrb) {
1009 		status = -EBUSY;
1010 		goto err;
1011 	}
1012 	req = embedded_payload(wrb);
1013 
1014 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1015 			       OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
1016 			       NULL);
1017 	req->type = MAC_ADDRESS_TYPE_NETWORK;
1018 	if (permanent) {
1019 		req->permanent = 1;
1020 	} else {
1021 		req->if_id = cpu_to_le16((u16)if_handle);
1022 		req->pmac_id = cpu_to_le32(pmac_id);
1023 		req->permanent = 0;
1024 	}
1025 
1026 	status = be_mcc_notify_wait(adapter);
1027 	if (!status) {
1028 		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
1029 
1030 		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
1031 	}
1032 
1033 err:
1034 	spin_unlock_bh(&adapter->mcc_lock);
1035 	return status;
1036 }
1037 
1038 /* Uses synchronous MCCQ */
1039 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1040 		    u32 if_id, u32 *pmac_id, u32 domain)
1041 {
1042 	struct be_mcc_wrb *wrb;
1043 	struct be_cmd_req_pmac_add *req;
1044 	int status;
1045 
1046 	spin_lock_bh(&adapter->mcc_lock);
1047 
1048 	wrb = wrb_from_mccq(adapter);
1049 	if (!wrb) {
1050 		status = -EBUSY;
1051 		goto err;
1052 	}
1053 	req = embedded_payload(wrb);
1054 
1055 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1056 			       OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
1057 			       NULL);
1058 
1059 	req->hdr.domain = domain;
1060 	req->if_id = cpu_to_le32(if_id);
1061 	memcpy(req->mac_address, mac_addr, ETH_ALEN);
1062 
1063 	status = be_mcc_notify_wait(adapter);
1064 	if (!status) {
1065 		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1066 
1067 		*pmac_id = le32_to_cpu(resp->pmac_id);
1068 	}
1069 
1070 err:
1071 	spin_unlock_bh(&adapter->mcc_lock);
1072 
1073 	 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
1074 		status = -EPERM;
1075 
1076 	return status;
1077 }
1078 
1079 /* Uses synchronous MCCQ */
1080 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
1081 {
1082 	struct be_mcc_wrb *wrb;
1083 	struct be_cmd_req_pmac_del *req;
1084 	int status;
1085 
1086 	if (pmac_id == -1)
1087 		return 0;
1088 
1089 	spin_lock_bh(&adapter->mcc_lock);
1090 
1091 	wrb = wrb_from_mccq(adapter);
1092 	if (!wrb) {
1093 		status = -EBUSY;
1094 		goto err;
1095 	}
1096 	req = embedded_payload(wrb);
1097 
1098 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1099 			       OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
1100 			       wrb, NULL);
1101 
1102 	req->hdr.domain = dom;
1103 	req->if_id = cpu_to_le32(if_id);
1104 	req->pmac_id = cpu_to_le32(pmac_id);
1105 
1106 	status = be_mcc_notify_wait(adapter);
1107 
1108 err:
1109 	spin_unlock_bh(&adapter->mcc_lock);
1110 	return status;
1111 }
1112 
1113 /* Uses Mbox */
1114 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1115 		     struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1116 {
1117 	struct be_mcc_wrb *wrb;
1118 	struct be_cmd_req_cq_create *req;
1119 	struct be_dma_mem *q_mem = &cq->dma_mem;
1120 	void *ctxt;
1121 	int status;
1122 
1123 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1124 		return -1;
1125 
1126 	wrb = wrb_from_mbox(adapter);
1127 	req = embedded_payload(wrb);
1128 	ctxt = &req->context;
1129 
1130 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1131 			       OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1132 			       NULL);
1133 
1134 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1135 
1136 	if (BEx_chip(adapter)) {
1137 		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1138 			      coalesce_wm);
1139 		AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1140 			      ctxt, no_delay);
1141 		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1142 			      __ilog2_u32(cq->len / 256));
1143 		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1144 		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1145 		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1146 	} else {
1147 		req->hdr.version = 2;
1148 		req->page_size = 1; /* 1 for 4K */
1149 
1150 		/* coalesce-wm field in this cmd is not relevant to Lancer.
1151 		 * Lancer uses COMMON_MODIFY_CQ to set this field
1152 		 */
1153 		if (!lancer_chip(adapter))
1154 			AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1155 				      ctxt, coalesce_wm);
1156 		AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1157 			      no_delay);
1158 		AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1159 			      __ilog2_u32(cq->len / 256));
1160 		AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1161 		AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1162 		AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1163 	}
1164 
1165 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1166 
1167 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1168 
1169 	status = be_mbox_notify_wait(adapter);
1170 	if (!status) {
1171 		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1172 
1173 		cq->id = le16_to_cpu(resp->cq_id);
1174 		cq->created = true;
1175 	}
1176 
1177 	mutex_unlock(&adapter->mbox_lock);
1178 
1179 	return status;
1180 }
1181 
1182 static u32 be_encoded_q_len(int q_len)
1183 {
1184 	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1185 
1186 	if (len_encoded == 16)
1187 		len_encoded = 0;
1188 	return len_encoded;
1189 }
1190 
1191 static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1192 				  struct be_queue_info *mccq,
1193 				  struct be_queue_info *cq)
1194 {
1195 	struct be_mcc_wrb *wrb;
1196 	struct be_cmd_req_mcc_ext_create *req;
1197 	struct be_dma_mem *q_mem = &mccq->dma_mem;
1198 	void *ctxt;
1199 	int status;
1200 
1201 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1202 		return -1;
1203 
1204 	wrb = wrb_from_mbox(adapter);
1205 	req = embedded_payload(wrb);
1206 	ctxt = &req->context;
1207 
1208 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1209 			       OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1210 			       NULL);
1211 
1212 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1213 	if (BEx_chip(adapter)) {
1214 		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1215 		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1216 			      be_encoded_q_len(mccq->len));
1217 		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1218 	} else {
1219 		req->hdr.version = 1;
1220 		req->cq_id = cpu_to_le16(cq->id);
1221 
1222 		AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1223 			      be_encoded_q_len(mccq->len));
1224 		AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1225 		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1226 			      ctxt, cq->id);
1227 		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1228 			      ctxt, 1);
1229 	}
1230 
1231 	/* Subscribe to Link State, Sliport Event and Group 5 Events
1232 	 * (bits 1, 5 and 17 set)
1233 	 */
1234 	req->async_event_bitmap[0] =
1235 			cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
1236 				    BIT(ASYNC_EVENT_CODE_GRP_5) |
1237 				    BIT(ASYNC_EVENT_CODE_QNQ) |
1238 				    BIT(ASYNC_EVENT_CODE_SLIPORT));
1239 
1240 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1241 
1242 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1243 
1244 	status = be_mbox_notify_wait(adapter);
1245 	if (!status) {
1246 		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1247 
1248 		mccq->id = le16_to_cpu(resp->id);
1249 		mccq->created = true;
1250 	}
1251 	mutex_unlock(&adapter->mbox_lock);
1252 
1253 	return status;
1254 }
1255 
1256 static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1257 				  struct be_queue_info *mccq,
1258 				  struct be_queue_info *cq)
1259 {
1260 	struct be_mcc_wrb *wrb;
1261 	struct be_cmd_req_mcc_create *req;
1262 	struct be_dma_mem *q_mem = &mccq->dma_mem;
1263 	void *ctxt;
1264 	int status;
1265 
1266 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1267 		return -1;
1268 
1269 	wrb = wrb_from_mbox(adapter);
1270 	req = embedded_payload(wrb);
1271 	ctxt = &req->context;
1272 
1273 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1274 			       OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1275 			       NULL);
1276 
1277 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1278 
1279 	AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1280 	AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1281 		      be_encoded_q_len(mccq->len));
1282 	AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1283 
1284 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1285 
1286 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1287 
1288 	status = be_mbox_notify_wait(adapter);
1289 	if (!status) {
1290 		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1291 
1292 		mccq->id = le16_to_cpu(resp->id);
1293 		mccq->created = true;
1294 	}
1295 
1296 	mutex_unlock(&adapter->mbox_lock);
1297 	return status;
1298 }
1299 
1300 int be_cmd_mccq_create(struct be_adapter *adapter,
1301 		       struct be_queue_info *mccq, struct be_queue_info *cq)
1302 {
1303 	int status;
1304 
1305 	status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1306 	if (status && BEx_chip(adapter)) {
1307 		dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1308 			"or newer to avoid conflicting priorities between NIC "
1309 			"and FCoE traffic");
1310 		status = be_cmd_mccq_org_create(adapter, mccq, cq);
1311 	}
1312 	return status;
1313 }
1314 
1315 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1316 {
1317 	struct be_mcc_wrb wrb = {0};
1318 	struct be_cmd_req_eth_tx_create *req;
1319 	struct be_queue_info *txq = &txo->q;
1320 	struct be_queue_info *cq = &txo->cq;
1321 	struct be_dma_mem *q_mem = &txq->dma_mem;
1322 	int status, ver = 0;
1323 
1324 	req = embedded_payload(&wrb);
1325 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1326 			       OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1327 
1328 	if (lancer_chip(adapter)) {
1329 		req->hdr.version = 1;
1330 	} else if (BEx_chip(adapter)) {
1331 		if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1332 			req->hdr.version = 2;
1333 	} else { /* For SH */
1334 		req->hdr.version = 2;
1335 	}
1336 
1337 	if (req->hdr.version > 0)
1338 		req->if_id = cpu_to_le16(adapter->if_handle);
1339 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1340 	req->ulp_num = BE_ULP1_NUM;
1341 	req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1342 	req->cq_id = cpu_to_le16(cq->id);
1343 	req->queue_size = be_encoded_q_len(txq->len);
1344 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1345 	ver = req->hdr.version;
1346 
1347 	status = be_cmd_notify_wait(adapter, &wrb);
1348 	if (!status) {
1349 		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1350 
1351 		txq->id = le16_to_cpu(resp->cid);
1352 		if (ver == 2)
1353 			txo->db_offset = le32_to_cpu(resp->db_offset);
1354 		else
1355 			txo->db_offset = DB_TXULP1_OFFSET;
1356 		txq->created = true;
1357 	}
1358 
1359 	return status;
1360 }
1361 
1362 /* Uses MCC */
1363 int be_cmd_rxq_create(struct be_adapter *adapter,
1364 		      struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1365 		      u32 if_id, u32 rss, u8 *rss_id)
1366 {
1367 	struct be_mcc_wrb *wrb;
1368 	struct be_cmd_req_eth_rx_create *req;
1369 	struct be_dma_mem *q_mem = &rxq->dma_mem;
1370 	int status;
1371 
1372 	spin_lock_bh(&adapter->mcc_lock);
1373 
1374 	wrb = wrb_from_mccq(adapter);
1375 	if (!wrb) {
1376 		status = -EBUSY;
1377 		goto err;
1378 	}
1379 	req = embedded_payload(wrb);
1380 
1381 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1382 			       OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1383 
1384 	req->cq_id = cpu_to_le16(cq_id);
1385 	req->frag_size = fls(frag_size) - 1;
1386 	req->num_pages = 2;
1387 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1388 	req->interface_id = cpu_to_le32(if_id);
1389 	req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1390 	req->rss_queue = cpu_to_le32(rss);
1391 
1392 	status = be_mcc_notify_wait(adapter);
1393 	if (!status) {
1394 		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1395 
1396 		rxq->id = le16_to_cpu(resp->id);
1397 		rxq->created = true;
1398 		*rss_id = resp->rss_id;
1399 	}
1400 
1401 err:
1402 	spin_unlock_bh(&adapter->mcc_lock);
1403 	return status;
1404 }
1405 
1406 /* Generic destroyer function for all types of queues
1407  * Uses Mbox
1408  */
1409 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1410 		     int queue_type)
1411 {
1412 	struct be_mcc_wrb *wrb;
1413 	struct be_cmd_req_q_destroy *req;
1414 	u8 subsys = 0, opcode = 0;
1415 	int status;
1416 
1417 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1418 		return -1;
1419 
1420 	wrb = wrb_from_mbox(adapter);
1421 	req = embedded_payload(wrb);
1422 
1423 	switch (queue_type) {
1424 	case QTYPE_EQ:
1425 		subsys = CMD_SUBSYSTEM_COMMON;
1426 		opcode = OPCODE_COMMON_EQ_DESTROY;
1427 		break;
1428 	case QTYPE_CQ:
1429 		subsys = CMD_SUBSYSTEM_COMMON;
1430 		opcode = OPCODE_COMMON_CQ_DESTROY;
1431 		break;
1432 	case QTYPE_TXQ:
1433 		subsys = CMD_SUBSYSTEM_ETH;
1434 		opcode = OPCODE_ETH_TX_DESTROY;
1435 		break;
1436 	case QTYPE_RXQ:
1437 		subsys = CMD_SUBSYSTEM_ETH;
1438 		opcode = OPCODE_ETH_RX_DESTROY;
1439 		break;
1440 	case QTYPE_MCCQ:
1441 		subsys = CMD_SUBSYSTEM_COMMON;
1442 		opcode = OPCODE_COMMON_MCC_DESTROY;
1443 		break;
1444 	default:
1445 		BUG();
1446 	}
1447 
1448 	be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1449 			       NULL);
1450 	req->id = cpu_to_le16(q->id);
1451 
1452 	status = be_mbox_notify_wait(adapter);
1453 	q->created = false;
1454 
1455 	mutex_unlock(&adapter->mbox_lock);
1456 	return status;
1457 }
1458 
1459 /* Uses MCC */
1460 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1461 {
1462 	struct be_mcc_wrb *wrb;
1463 	struct be_cmd_req_q_destroy *req;
1464 	int status;
1465 
1466 	spin_lock_bh(&adapter->mcc_lock);
1467 
1468 	wrb = wrb_from_mccq(adapter);
1469 	if (!wrb) {
1470 		status = -EBUSY;
1471 		goto err;
1472 	}
1473 	req = embedded_payload(wrb);
1474 
1475 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1476 			       OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1477 	req->id = cpu_to_le16(q->id);
1478 
1479 	status = be_mcc_notify_wait(adapter);
1480 	q->created = false;
1481 
1482 err:
1483 	spin_unlock_bh(&adapter->mcc_lock);
1484 	return status;
1485 }
1486 
1487 /* Create an rx filtering policy configuration on an i/f
1488  * Will use MBOX only if MCCQ has not been created.
1489  */
1490 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1491 		     u32 *if_handle, u32 domain)
1492 {
1493 	struct be_mcc_wrb wrb = {0};
1494 	struct be_cmd_req_if_create *req;
1495 	int status;
1496 
1497 	req = embedded_payload(&wrb);
1498 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1499 			       OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1500 			       sizeof(*req), &wrb, NULL);
1501 	req->hdr.domain = domain;
1502 	req->capability_flags = cpu_to_le32(cap_flags);
1503 	req->enable_flags = cpu_to_le32(en_flags);
1504 	req->pmac_invalid = true;
1505 
1506 	status = be_cmd_notify_wait(adapter, &wrb);
1507 	if (!status) {
1508 		struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1509 
1510 		*if_handle = le32_to_cpu(resp->interface_id);
1511 
1512 		/* Hack to retrieve VF's pmac-id on BE3 */
1513 		if (BE3_chip(adapter) && !be_physfn(adapter))
1514 			adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1515 	}
1516 	return status;
1517 }
1518 
1519 /* Uses MCCQ */
1520 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1521 {
1522 	struct be_mcc_wrb *wrb;
1523 	struct be_cmd_req_if_destroy *req;
1524 	int status;
1525 
1526 	if (interface_id == -1)
1527 		return 0;
1528 
1529 	spin_lock_bh(&adapter->mcc_lock);
1530 
1531 	wrb = wrb_from_mccq(adapter);
1532 	if (!wrb) {
1533 		status = -EBUSY;
1534 		goto err;
1535 	}
1536 	req = embedded_payload(wrb);
1537 
1538 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1539 			       OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1540 			       sizeof(*req), wrb, NULL);
1541 	req->hdr.domain = domain;
1542 	req->interface_id = cpu_to_le32(interface_id);
1543 
1544 	status = be_mcc_notify_wait(adapter);
1545 err:
1546 	spin_unlock_bh(&adapter->mcc_lock);
1547 	return status;
1548 }
1549 
1550 /* Get stats is a non embedded command: the request is not embedded inside
1551  * WRB but is a separate dma memory block
1552  * Uses asynchronous MCC
1553  */
1554 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1555 {
1556 	struct be_mcc_wrb *wrb;
1557 	struct be_cmd_req_hdr *hdr;
1558 	int status = 0;
1559 
1560 	spin_lock_bh(&adapter->mcc_lock);
1561 
1562 	wrb = wrb_from_mccq(adapter);
1563 	if (!wrb) {
1564 		status = -EBUSY;
1565 		goto err;
1566 	}
1567 	hdr = nonemb_cmd->va;
1568 
1569 	be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1570 			       OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1571 			       nonemb_cmd);
1572 
1573 	/* version 1 of the cmd is not supported only by BE2 */
1574 	if (BE2_chip(adapter))
1575 		hdr->version = 0;
1576 	if (BE3_chip(adapter) || lancer_chip(adapter))
1577 		hdr->version = 1;
1578 	else
1579 		hdr->version = 2;
1580 
1581 	be_mcc_notify(adapter);
1582 	adapter->stats_cmd_sent = true;
1583 
1584 err:
1585 	spin_unlock_bh(&adapter->mcc_lock);
1586 	return status;
1587 }
1588 
1589 /* Lancer Stats */
1590 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1591 			       struct be_dma_mem *nonemb_cmd)
1592 {
1593 	struct be_mcc_wrb *wrb;
1594 	struct lancer_cmd_req_pport_stats *req;
1595 	int status = 0;
1596 
1597 	if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1598 			    CMD_SUBSYSTEM_ETH))
1599 		return -EPERM;
1600 
1601 	spin_lock_bh(&adapter->mcc_lock);
1602 
1603 	wrb = wrb_from_mccq(adapter);
1604 	if (!wrb) {
1605 		status = -EBUSY;
1606 		goto err;
1607 	}
1608 	req = nonemb_cmd->va;
1609 
1610 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1611 			       OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1612 			       wrb, nonemb_cmd);
1613 
1614 	req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1615 	req->cmd_params.params.reset_stats = 0;
1616 
1617 	be_mcc_notify(adapter);
1618 	adapter->stats_cmd_sent = true;
1619 
1620 err:
1621 	spin_unlock_bh(&adapter->mcc_lock);
1622 	return status;
1623 }
1624 
1625 static int be_mac_to_link_speed(int mac_speed)
1626 {
1627 	switch (mac_speed) {
1628 	case PHY_LINK_SPEED_ZERO:
1629 		return 0;
1630 	case PHY_LINK_SPEED_10MBPS:
1631 		return 10;
1632 	case PHY_LINK_SPEED_100MBPS:
1633 		return 100;
1634 	case PHY_LINK_SPEED_1GBPS:
1635 		return 1000;
1636 	case PHY_LINK_SPEED_10GBPS:
1637 		return 10000;
1638 	case PHY_LINK_SPEED_20GBPS:
1639 		return 20000;
1640 	case PHY_LINK_SPEED_25GBPS:
1641 		return 25000;
1642 	case PHY_LINK_SPEED_40GBPS:
1643 		return 40000;
1644 	}
1645 	return 0;
1646 }
1647 
1648 /* Uses synchronous mcc
1649  * Returns link_speed in Mbps
1650  */
1651 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1652 			     u8 *link_status, u32 dom)
1653 {
1654 	struct be_mcc_wrb *wrb;
1655 	struct be_cmd_req_link_status *req;
1656 	int status;
1657 
1658 	spin_lock_bh(&adapter->mcc_lock);
1659 
1660 	if (link_status)
1661 		*link_status = LINK_DOWN;
1662 
1663 	wrb = wrb_from_mccq(adapter);
1664 	if (!wrb) {
1665 		status = -EBUSY;
1666 		goto err;
1667 	}
1668 	req = embedded_payload(wrb);
1669 
1670 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1671 			       OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1672 			       sizeof(*req), wrb, NULL);
1673 
1674 	/* version 1 of the cmd is not supported only by BE2 */
1675 	if (!BE2_chip(adapter))
1676 		req->hdr.version = 1;
1677 
1678 	req->hdr.domain = dom;
1679 
1680 	status = be_mcc_notify_wait(adapter);
1681 	if (!status) {
1682 		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1683 
1684 		if (link_speed) {
1685 			*link_speed = resp->link_speed ?
1686 				      le16_to_cpu(resp->link_speed) * 10 :
1687 				      be_mac_to_link_speed(resp->mac_speed);
1688 
1689 			if (!resp->logical_link_status)
1690 				*link_speed = 0;
1691 		}
1692 		if (link_status)
1693 			*link_status = resp->logical_link_status;
1694 	}
1695 
1696 err:
1697 	spin_unlock_bh(&adapter->mcc_lock);
1698 	return status;
1699 }
1700 
1701 /* Uses synchronous mcc */
1702 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1703 {
1704 	struct be_mcc_wrb *wrb;
1705 	struct be_cmd_req_get_cntl_addnl_attribs *req;
1706 	int status = 0;
1707 
1708 	spin_lock_bh(&adapter->mcc_lock);
1709 
1710 	wrb = wrb_from_mccq(adapter);
1711 	if (!wrb) {
1712 		status = -EBUSY;
1713 		goto err;
1714 	}
1715 	req = embedded_payload(wrb);
1716 
1717 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1718 			       OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1719 			       sizeof(*req), wrb, NULL);
1720 
1721 	be_mcc_notify(adapter);
1722 
1723 err:
1724 	spin_unlock_bh(&adapter->mcc_lock);
1725 	return status;
1726 }
1727 
1728 /* Uses synchronous mcc */
1729 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1730 {
1731 	struct be_mcc_wrb *wrb;
1732 	struct be_cmd_req_get_fat *req;
1733 	int status;
1734 
1735 	spin_lock_bh(&adapter->mcc_lock);
1736 
1737 	wrb = wrb_from_mccq(adapter);
1738 	if (!wrb) {
1739 		status = -EBUSY;
1740 		goto err;
1741 	}
1742 	req = embedded_payload(wrb);
1743 
1744 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1745 			       OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
1746 			       NULL);
1747 	req->fat_operation = cpu_to_le32(QUERY_FAT);
1748 	status = be_mcc_notify_wait(adapter);
1749 	if (!status) {
1750 		struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1751 
1752 		if (log_size && resp->log_size)
1753 			*log_size = le32_to_cpu(resp->log_size) -
1754 					sizeof(u32);
1755 	}
1756 err:
1757 	spin_unlock_bh(&adapter->mcc_lock);
1758 	return status;
1759 }
1760 
1761 int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1762 {
1763 	struct be_dma_mem get_fat_cmd;
1764 	struct be_mcc_wrb *wrb;
1765 	struct be_cmd_req_get_fat *req;
1766 	u32 offset = 0, total_size, buf_size,
1767 				log_offset = sizeof(u32), payload_len;
1768 	int status = 0;
1769 
1770 	if (buf_len == 0)
1771 		return -EIO;
1772 
1773 	total_size = buf_len;
1774 
1775 	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1776 	get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1777 					      get_fat_cmd.size,
1778 					      &get_fat_cmd.dma);
1779 	if (!get_fat_cmd.va) {
1780 		dev_err(&adapter->pdev->dev,
1781 			"Memory allocation failure while reading FAT data\n");
1782 		return -ENOMEM;
1783 	}
1784 
1785 	spin_lock_bh(&adapter->mcc_lock);
1786 
1787 	while (total_size) {
1788 		buf_size = min(total_size, (u32)60*1024);
1789 		total_size -= buf_size;
1790 
1791 		wrb = wrb_from_mccq(adapter);
1792 		if (!wrb) {
1793 			status = -EBUSY;
1794 			goto err;
1795 		}
1796 		req = get_fat_cmd.va;
1797 
1798 		payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1799 		be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1800 				       OPCODE_COMMON_MANAGE_FAT, payload_len,
1801 				       wrb, &get_fat_cmd);
1802 
1803 		req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1804 		req->read_log_offset = cpu_to_le32(log_offset);
1805 		req->read_log_length = cpu_to_le32(buf_size);
1806 		req->data_buffer_size = cpu_to_le32(buf_size);
1807 
1808 		status = be_mcc_notify_wait(adapter);
1809 		if (!status) {
1810 			struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1811 
1812 			memcpy(buf + offset,
1813 			       resp->data_buffer,
1814 			       le32_to_cpu(resp->read_log_length));
1815 		} else {
1816 			dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1817 			goto err;
1818 		}
1819 		offset += buf_size;
1820 		log_offset += buf_size;
1821 	}
1822 err:
1823 	pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1824 			    get_fat_cmd.va, get_fat_cmd.dma);
1825 	spin_unlock_bh(&adapter->mcc_lock);
1826 	return status;
1827 }
1828 
1829 /* Uses synchronous mcc */
1830 int be_cmd_get_fw_ver(struct be_adapter *adapter)
1831 {
1832 	struct be_mcc_wrb *wrb;
1833 	struct be_cmd_req_get_fw_version *req;
1834 	int status;
1835 
1836 	spin_lock_bh(&adapter->mcc_lock);
1837 
1838 	wrb = wrb_from_mccq(adapter);
1839 	if (!wrb) {
1840 		status = -EBUSY;
1841 		goto err;
1842 	}
1843 
1844 	req = embedded_payload(wrb);
1845 
1846 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1847 			       OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1848 			       NULL);
1849 	status = be_mcc_notify_wait(adapter);
1850 	if (!status) {
1851 		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1852 
1853 		strlcpy(adapter->fw_ver, resp->firmware_version_string,
1854 			sizeof(adapter->fw_ver));
1855 		strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
1856 			sizeof(adapter->fw_on_flash));
1857 	}
1858 err:
1859 	spin_unlock_bh(&adapter->mcc_lock);
1860 	return status;
1861 }
1862 
1863 /* set the EQ delay interval of an EQ to specified value
1864  * Uses async mcc
1865  */
1866 static int __be_cmd_modify_eqd(struct be_adapter *adapter,
1867 			       struct be_set_eqd *set_eqd, int num)
1868 {
1869 	struct be_mcc_wrb *wrb;
1870 	struct be_cmd_req_modify_eq_delay *req;
1871 	int status = 0, i;
1872 
1873 	spin_lock_bh(&adapter->mcc_lock);
1874 
1875 	wrb = wrb_from_mccq(adapter);
1876 	if (!wrb) {
1877 		status = -EBUSY;
1878 		goto err;
1879 	}
1880 	req = embedded_payload(wrb);
1881 
1882 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1883 			       OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1884 			       NULL);
1885 
1886 	req->num_eq = cpu_to_le32(num);
1887 	for (i = 0; i < num; i++) {
1888 		req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1889 		req->set_eqd[i].phase = 0;
1890 		req->set_eqd[i].delay_multiplier =
1891 				cpu_to_le32(set_eqd[i].delay_multiplier);
1892 	}
1893 
1894 	be_mcc_notify(adapter);
1895 err:
1896 	spin_unlock_bh(&adapter->mcc_lock);
1897 	return status;
1898 }
1899 
1900 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1901 		      int num)
1902 {
1903 	int num_eqs, i = 0;
1904 
1905 	if (lancer_chip(adapter) && num > 8) {
1906 		while (num) {
1907 			num_eqs = min(num, 8);
1908 			__be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1909 			i += num_eqs;
1910 			num -= num_eqs;
1911 		}
1912 	} else {
1913 		__be_cmd_modify_eqd(adapter, set_eqd, num);
1914 	}
1915 
1916 	return 0;
1917 }
1918 
1919 /* Uses sycnhronous mcc */
1920 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1921 		       u32 num)
1922 {
1923 	struct be_mcc_wrb *wrb;
1924 	struct be_cmd_req_vlan_config *req;
1925 	int status;
1926 
1927 	spin_lock_bh(&adapter->mcc_lock);
1928 
1929 	wrb = wrb_from_mccq(adapter);
1930 	if (!wrb) {
1931 		status = -EBUSY;
1932 		goto err;
1933 	}
1934 	req = embedded_payload(wrb);
1935 
1936 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1937 			       OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1938 			       wrb, NULL);
1939 
1940 	req->interface_id = if_id;
1941 	req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1942 	req->num_vlan = num;
1943 	memcpy(req->normal_vlan, vtag_array,
1944 	       req->num_vlan * sizeof(vtag_array[0]));
1945 
1946 	status = be_mcc_notify_wait(adapter);
1947 err:
1948 	spin_unlock_bh(&adapter->mcc_lock);
1949 	return status;
1950 }
1951 
1952 static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1953 {
1954 	struct be_mcc_wrb *wrb;
1955 	struct be_dma_mem *mem = &adapter->rx_filter;
1956 	struct be_cmd_req_rx_filter *req = mem->va;
1957 	int status;
1958 
1959 	spin_lock_bh(&adapter->mcc_lock);
1960 
1961 	wrb = wrb_from_mccq(adapter);
1962 	if (!wrb) {
1963 		status = -EBUSY;
1964 		goto err;
1965 	}
1966 	memset(req, 0, sizeof(*req));
1967 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1968 			       OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1969 			       wrb, mem);
1970 
1971 	req->if_id = cpu_to_le32(adapter->if_handle);
1972 	req->if_flags_mask = cpu_to_le32(flags);
1973 	req->if_flags = (value == ON) ? req->if_flags_mask : 0;
1974 
1975 	if (flags & BE_IF_FLAGS_MULTICAST) {
1976 		struct netdev_hw_addr *ha;
1977 		int i = 0;
1978 
1979 		/* Reset mcast promisc mode if already set by setting mask
1980 		 * and not setting flags field
1981 		 */
1982 		req->if_flags_mask |=
1983 			cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1984 				    be_if_cap_flags(adapter));
1985 		req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1986 		netdev_for_each_mc_addr(ha, adapter->netdev)
1987 			memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1988 	}
1989 
1990 	status = be_mcc_notify_wait(adapter);
1991 err:
1992 	spin_unlock_bh(&adapter->mcc_lock);
1993 	return status;
1994 }
1995 
1996 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1997 {
1998 	struct device *dev = &adapter->pdev->dev;
1999 
2000 	if ((flags & be_if_cap_flags(adapter)) != flags) {
2001 		dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
2002 		dev_warn(dev, "Interface is capable of 0x%x flags only\n",
2003 			 be_if_cap_flags(adapter));
2004 	}
2005 	flags &= be_if_cap_flags(adapter);
2006 
2007 	return __be_cmd_rx_filter(adapter, flags, value);
2008 }
2009 
2010 /* Uses synchrounous mcc */
2011 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
2012 {
2013 	struct be_mcc_wrb *wrb;
2014 	struct be_cmd_req_set_flow_control *req;
2015 	int status;
2016 
2017 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
2018 			    CMD_SUBSYSTEM_COMMON))
2019 		return -EPERM;
2020 
2021 	spin_lock_bh(&adapter->mcc_lock);
2022 
2023 	wrb = wrb_from_mccq(adapter);
2024 	if (!wrb) {
2025 		status = -EBUSY;
2026 		goto err;
2027 	}
2028 	req = embedded_payload(wrb);
2029 
2030 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2031 			       OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
2032 			       wrb, NULL);
2033 
2034 	req->hdr.version = 1;
2035 	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
2036 	req->rx_flow_control = cpu_to_le16((u16)rx_fc);
2037 
2038 	status = be_mcc_notify_wait(adapter);
2039 
2040 err:
2041 	spin_unlock_bh(&adapter->mcc_lock);
2042 
2043 	if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
2044 		return  -EOPNOTSUPP;
2045 
2046 	return status;
2047 }
2048 
2049 /* Uses sycn mcc */
2050 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
2051 {
2052 	struct be_mcc_wrb *wrb;
2053 	struct be_cmd_req_get_flow_control *req;
2054 	int status;
2055 
2056 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
2057 			    CMD_SUBSYSTEM_COMMON))
2058 		return -EPERM;
2059 
2060 	spin_lock_bh(&adapter->mcc_lock);
2061 
2062 	wrb = wrb_from_mccq(adapter);
2063 	if (!wrb) {
2064 		status = -EBUSY;
2065 		goto err;
2066 	}
2067 	req = embedded_payload(wrb);
2068 
2069 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2070 			       OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
2071 			       wrb, NULL);
2072 
2073 	status = be_mcc_notify_wait(adapter);
2074 	if (!status) {
2075 		struct be_cmd_resp_get_flow_control *resp =
2076 						embedded_payload(wrb);
2077 
2078 		*tx_fc = le16_to_cpu(resp->tx_flow_control);
2079 		*rx_fc = le16_to_cpu(resp->rx_flow_control);
2080 	}
2081 
2082 err:
2083 	spin_unlock_bh(&adapter->mcc_lock);
2084 	return status;
2085 }
2086 
2087 /* Uses mbox */
2088 int be_cmd_query_fw_cfg(struct be_adapter *adapter)
2089 {
2090 	struct be_mcc_wrb *wrb;
2091 	struct be_cmd_req_query_fw_cfg *req;
2092 	int status;
2093 
2094 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2095 		return -1;
2096 
2097 	wrb = wrb_from_mbox(adapter);
2098 	req = embedded_payload(wrb);
2099 
2100 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2101 			       OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2102 			       sizeof(*req), wrb, NULL);
2103 
2104 	status = be_mbox_notify_wait(adapter);
2105 	if (!status) {
2106 		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2107 
2108 		adapter->port_num = le32_to_cpu(resp->phys_port);
2109 		adapter->function_mode = le32_to_cpu(resp->function_mode);
2110 		adapter->function_caps = le32_to_cpu(resp->function_caps);
2111 		adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
2112 		dev_info(&adapter->pdev->dev,
2113 			 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2114 			 adapter->function_mode, adapter->function_caps);
2115 	}
2116 
2117 	mutex_unlock(&adapter->mbox_lock);
2118 	return status;
2119 }
2120 
2121 /* Uses mbox */
2122 int be_cmd_reset_function(struct be_adapter *adapter)
2123 {
2124 	struct be_mcc_wrb *wrb;
2125 	struct be_cmd_req_hdr *req;
2126 	int status;
2127 
2128 	if (lancer_chip(adapter)) {
2129 		status = lancer_wait_ready(adapter);
2130 		if (!status) {
2131 			iowrite32(SLI_PORT_CONTROL_IP_MASK,
2132 				  adapter->db + SLIPORT_CONTROL_OFFSET);
2133 			status = lancer_test_and_set_rdy_state(adapter);
2134 		}
2135 		if (status) {
2136 			dev_err(&adapter->pdev->dev,
2137 				"Adapter in non recoverable error\n");
2138 		}
2139 		return status;
2140 	}
2141 
2142 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2143 		return -1;
2144 
2145 	wrb = wrb_from_mbox(adapter);
2146 	req = embedded_payload(wrb);
2147 
2148 	be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2149 			       OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2150 			       NULL);
2151 
2152 	status = be_mbox_notify_wait(adapter);
2153 
2154 	mutex_unlock(&adapter->mbox_lock);
2155 	return status;
2156 }
2157 
2158 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2159 		      u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2160 {
2161 	struct be_mcc_wrb *wrb;
2162 	struct be_cmd_req_rss_config *req;
2163 	int status;
2164 
2165 	if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2166 		return 0;
2167 
2168 	spin_lock_bh(&adapter->mcc_lock);
2169 
2170 	wrb = wrb_from_mccq(adapter);
2171 	if (!wrb) {
2172 		status = -EBUSY;
2173 		goto err;
2174 	}
2175 	req = embedded_payload(wrb);
2176 
2177 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2178 			       OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2179 
2180 	req->if_id = cpu_to_le32(adapter->if_handle);
2181 	req->enable_rss = cpu_to_le16(rss_hash_opts);
2182 	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2183 
2184 	if (!BEx_chip(adapter))
2185 		req->hdr.version = 1;
2186 
2187 	memcpy(req->cpu_table, rsstable, table_size);
2188 	memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2189 	be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2190 
2191 	status = be_mcc_notify_wait(adapter);
2192 err:
2193 	spin_unlock_bh(&adapter->mcc_lock);
2194 	return status;
2195 }
2196 
2197 /* Uses sync mcc */
2198 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2199 			    u8 bcn, u8 sts, u8 state)
2200 {
2201 	struct be_mcc_wrb *wrb;
2202 	struct be_cmd_req_enable_disable_beacon *req;
2203 	int status;
2204 
2205 	spin_lock_bh(&adapter->mcc_lock);
2206 
2207 	wrb = wrb_from_mccq(adapter);
2208 	if (!wrb) {
2209 		status = -EBUSY;
2210 		goto err;
2211 	}
2212 	req = embedded_payload(wrb);
2213 
2214 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2215 			       OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2216 			       sizeof(*req), wrb, NULL);
2217 
2218 	req->port_num = port_num;
2219 	req->beacon_state = state;
2220 	req->beacon_duration = bcn;
2221 	req->status_duration = sts;
2222 
2223 	status = be_mcc_notify_wait(adapter);
2224 
2225 err:
2226 	spin_unlock_bh(&adapter->mcc_lock);
2227 	return status;
2228 }
2229 
2230 /* Uses sync mcc */
2231 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2232 {
2233 	struct be_mcc_wrb *wrb;
2234 	struct be_cmd_req_get_beacon_state *req;
2235 	int status;
2236 
2237 	spin_lock_bh(&adapter->mcc_lock);
2238 
2239 	wrb = wrb_from_mccq(adapter);
2240 	if (!wrb) {
2241 		status = -EBUSY;
2242 		goto err;
2243 	}
2244 	req = embedded_payload(wrb);
2245 
2246 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2247 			       OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2248 			       wrb, NULL);
2249 
2250 	req->port_num = port_num;
2251 
2252 	status = be_mcc_notify_wait(adapter);
2253 	if (!status) {
2254 		struct be_cmd_resp_get_beacon_state *resp =
2255 						embedded_payload(wrb);
2256 
2257 		*state = resp->beacon_state;
2258 	}
2259 
2260 err:
2261 	spin_unlock_bh(&adapter->mcc_lock);
2262 	return status;
2263 }
2264 
2265 /* Uses sync mcc */
2266 int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2267 				      u8 page_num, u8 *data)
2268 {
2269 	struct be_dma_mem cmd;
2270 	struct be_mcc_wrb *wrb;
2271 	struct be_cmd_req_port_type *req;
2272 	int status;
2273 
2274 	if (page_num > TR_PAGE_A2)
2275 		return -EINVAL;
2276 
2277 	cmd.size = sizeof(struct be_cmd_resp_port_type);
2278 	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2279 	if (!cmd.va) {
2280 		dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2281 		return -ENOMEM;
2282 	}
2283 	memset(cmd.va, 0, cmd.size);
2284 
2285 	spin_lock_bh(&adapter->mcc_lock);
2286 
2287 	wrb = wrb_from_mccq(adapter);
2288 	if (!wrb) {
2289 		status = -EBUSY;
2290 		goto err;
2291 	}
2292 	req = cmd.va;
2293 
2294 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2295 			       OPCODE_COMMON_READ_TRANSRECV_DATA,
2296 			       cmd.size, wrb, &cmd);
2297 
2298 	req->port = cpu_to_le32(adapter->hba_port_num);
2299 	req->page_num = cpu_to_le32(page_num);
2300 	status = be_mcc_notify_wait(adapter);
2301 	if (!status) {
2302 		struct be_cmd_resp_port_type *resp = cmd.va;
2303 
2304 		memcpy(data, resp->page_data, PAGE_DATA_LEN);
2305 	}
2306 err:
2307 	spin_unlock_bh(&adapter->mcc_lock);
2308 	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2309 	return status;
2310 }
2311 
2312 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2313 			    u32 data_size, u32 data_offset,
2314 			    const char *obj_name, u32 *data_written,
2315 			    u8 *change_status, u8 *addn_status)
2316 {
2317 	struct be_mcc_wrb *wrb;
2318 	struct lancer_cmd_req_write_object *req;
2319 	struct lancer_cmd_resp_write_object *resp;
2320 	void *ctxt = NULL;
2321 	int status;
2322 
2323 	spin_lock_bh(&adapter->mcc_lock);
2324 	adapter->flash_status = 0;
2325 
2326 	wrb = wrb_from_mccq(adapter);
2327 	if (!wrb) {
2328 		status = -EBUSY;
2329 		goto err_unlock;
2330 	}
2331 
2332 	req = embedded_payload(wrb);
2333 
2334 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2335 			       OPCODE_COMMON_WRITE_OBJECT,
2336 			       sizeof(struct lancer_cmd_req_write_object), wrb,
2337 			       NULL);
2338 
2339 	ctxt = &req->context;
2340 	AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2341 		      write_length, ctxt, data_size);
2342 
2343 	if (data_size == 0)
2344 		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2345 			      eof, ctxt, 1);
2346 	else
2347 		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2348 			      eof, ctxt, 0);
2349 
2350 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
2351 	req->write_offset = cpu_to_le32(data_offset);
2352 	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2353 	req->descriptor_count = cpu_to_le32(1);
2354 	req->buf_len = cpu_to_le32(data_size);
2355 	req->addr_low = cpu_to_le32((cmd->dma +
2356 				     sizeof(struct lancer_cmd_req_write_object))
2357 				    & 0xFFFFFFFF);
2358 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2359 				sizeof(struct lancer_cmd_req_write_object)));
2360 
2361 	be_mcc_notify(adapter);
2362 	spin_unlock_bh(&adapter->mcc_lock);
2363 
2364 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2365 					 msecs_to_jiffies(60000)))
2366 		status = -ETIMEDOUT;
2367 	else
2368 		status = adapter->flash_status;
2369 
2370 	resp = embedded_payload(wrb);
2371 	if (!status) {
2372 		*data_written = le32_to_cpu(resp->actual_write_len);
2373 		*change_status = resp->change_status;
2374 	} else {
2375 		*addn_status = resp->additional_status;
2376 	}
2377 
2378 	return status;
2379 
2380 err_unlock:
2381 	spin_unlock_bh(&adapter->mcc_lock);
2382 	return status;
2383 }
2384 
2385 int be_cmd_query_cable_type(struct be_adapter *adapter)
2386 {
2387 	u8 page_data[PAGE_DATA_LEN];
2388 	int status;
2389 
2390 	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2391 						   page_data);
2392 	if (!status) {
2393 		switch (adapter->phy.interface_type) {
2394 		case PHY_TYPE_QSFP:
2395 			adapter->phy.cable_type =
2396 				page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
2397 			break;
2398 		case PHY_TYPE_SFP_PLUS_10GB:
2399 			adapter->phy.cable_type =
2400 				page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
2401 			break;
2402 		default:
2403 			adapter->phy.cable_type = 0;
2404 			break;
2405 		}
2406 	}
2407 	return status;
2408 }
2409 
2410 int be_cmd_query_sfp_info(struct be_adapter *adapter)
2411 {
2412 	u8 page_data[PAGE_DATA_LEN];
2413 	int status;
2414 
2415 	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2416 						   page_data);
2417 	if (!status) {
2418 		strlcpy(adapter->phy.vendor_name, page_data +
2419 			SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
2420 		strlcpy(adapter->phy.vendor_pn,
2421 			page_data + SFP_VENDOR_PN_OFFSET,
2422 			SFP_VENDOR_NAME_LEN - 1);
2423 	}
2424 
2425 	return status;
2426 }
2427 
2428 int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
2429 {
2430 	struct lancer_cmd_req_delete_object *req;
2431 	struct be_mcc_wrb *wrb;
2432 	int status;
2433 
2434 	spin_lock_bh(&adapter->mcc_lock);
2435 
2436 	wrb = wrb_from_mccq(adapter);
2437 	if (!wrb) {
2438 		status = -EBUSY;
2439 		goto err;
2440 	}
2441 
2442 	req = embedded_payload(wrb);
2443 
2444 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2445 			       OPCODE_COMMON_DELETE_OBJECT,
2446 			       sizeof(*req), wrb, NULL);
2447 
2448 	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2449 
2450 	status = be_mcc_notify_wait(adapter);
2451 err:
2452 	spin_unlock_bh(&adapter->mcc_lock);
2453 	return status;
2454 }
2455 
2456 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2457 			   u32 data_size, u32 data_offset, const char *obj_name,
2458 			   u32 *data_read, u32 *eof, u8 *addn_status)
2459 {
2460 	struct be_mcc_wrb *wrb;
2461 	struct lancer_cmd_req_read_object *req;
2462 	struct lancer_cmd_resp_read_object *resp;
2463 	int status;
2464 
2465 	spin_lock_bh(&adapter->mcc_lock);
2466 
2467 	wrb = wrb_from_mccq(adapter);
2468 	if (!wrb) {
2469 		status = -EBUSY;
2470 		goto err_unlock;
2471 	}
2472 
2473 	req = embedded_payload(wrb);
2474 
2475 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2476 			       OPCODE_COMMON_READ_OBJECT,
2477 			       sizeof(struct lancer_cmd_req_read_object), wrb,
2478 			       NULL);
2479 
2480 	req->desired_read_len = cpu_to_le32(data_size);
2481 	req->read_offset = cpu_to_le32(data_offset);
2482 	strcpy(req->object_name, obj_name);
2483 	req->descriptor_count = cpu_to_le32(1);
2484 	req->buf_len = cpu_to_le32(data_size);
2485 	req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2486 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2487 
2488 	status = be_mcc_notify_wait(adapter);
2489 
2490 	resp = embedded_payload(wrb);
2491 	if (!status) {
2492 		*data_read = le32_to_cpu(resp->actual_read_len);
2493 		*eof = le32_to_cpu(resp->eof);
2494 	} else {
2495 		*addn_status = resp->additional_status;
2496 	}
2497 
2498 err_unlock:
2499 	spin_unlock_bh(&adapter->mcc_lock);
2500 	return status;
2501 }
2502 
2503 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2504 			  u32 flash_type, u32 flash_opcode, u32 img_offset,
2505 			  u32 buf_size)
2506 {
2507 	struct be_mcc_wrb *wrb;
2508 	struct be_cmd_write_flashrom *req;
2509 	int status;
2510 
2511 	spin_lock_bh(&adapter->mcc_lock);
2512 	adapter->flash_status = 0;
2513 
2514 	wrb = wrb_from_mccq(adapter);
2515 	if (!wrb) {
2516 		status = -EBUSY;
2517 		goto err_unlock;
2518 	}
2519 	req = cmd->va;
2520 
2521 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2522 			       OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2523 			       cmd);
2524 
2525 	req->params.op_type = cpu_to_le32(flash_type);
2526 	if (flash_type == OPTYPE_OFFSET_SPECIFIED)
2527 		req->params.offset = cpu_to_le32(img_offset);
2528 
2529 	req->params.op_code = cpu_to_le32(flash_opcode);
2530 	req->params.data_buf_size = cpu_to_le32(buf_size);
2531 
2532 	be_mcc_notify(adapter);
2533 	spin_unlock_bh(&adapter->mcc_lock);
2534 
2535 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2536 					 msecs_to_jiffies(40000)))
2537 		status = -ETIMEDOUT;
2538 	else
2539 		status = adapter->flash_status;
2540 
2541 	return status;
2542 
2543 err_unlock:
2544 	spin_unlock_bh(&adapter->mcc_lock);
2545 	return status;
2546 }
2547 
2548 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2549 			 u16 img_optype, u32 img_offset, u32 crc_offset)
2550 {
2551 	struct be_cmd_read_flash_crc *req;
2552 	struct be_mcc_wrb *wrb;
2553 	int status;
2554 
2555 	spin_lock_bh(&adapter->mcc_lock);
2556 
2557 	wrb = wrb_from_mccq(adapter);
2558 	if (!wrb) {
2559 		status = -EBUSY;
2560 		goto err;
2561 	}
2562 	req = embedded_payload(wrb);
2563 
2564 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2565 			       OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2566 			       wrb, NULL);
2567 
2568 	req->params.op_type = cpu_to_le32(img_optype);
2569 	if (img_optype == OPTYPE_OFFSET_SPECIFIED)
2570 		req->params.offset = cpu_to_le32(img_offset + crc_offset);
2571 	else
2572 		req->params.offset = cpu_to_le32(crc_offset);
2573 
2574 	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2575 	req->params.data_buf_size = cpu_to_le32(0x4);
2576 
2577 	status = be_mcc_notify_wait(adapter);
2578 	if (!status)
2579 		memcpy(flashed_crc, req->crc, 4);
2580 
2581 err:
2582 	spin_unlock_bh(&adapter->mcc_lock);
2583 	return status;
2584 }
2585 
2586 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2587 			    struct be_dma_mem *nonemb_cmd)
2588 {
2589 	struct be_mcc_wrb *wrb;
2590 	struct be_cmd_req_acpi_wol_magic_config *req;
2591 	int status;
2592 
2593 	spin_lock_bh(&adapter->mcc_lock);
2594 
2595 	wrb = wrb_from_mccq(adapter);
2596 	if (!wrb) {
2597 		status = -EBUSY;
2598 		goto err;
2599 	}
2600 	req = nonemb_cmd->va;
2601 
2602 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2603 			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
2604 			       wrb, nonemb_cmd);
2605 	memcpy(req->magic_mac, mac, ETH_ALEN);
2606 
2607 	status = be_mcc_notify_wait(adapter);
2608 
2609 err:
2610 	spin_unlock_bh(&adapter->mcc_lock);
2611 	return status;
2612 }
2613 
2614 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2615 			u8 loopback_type, u8 enable)
2616 {
2617 	struct be_mcc_wrb *wrb;
2618 	struct be_cmd_req_set_lmode *req;
2619 	int status;
2620 
2621 	spin_lock_bh(&adapter->mcc_lock);
2622 
2623 	wrb = wrb_from_mccq(adapter);
2624 	if (!wrb) {
2625 		status = -EBUSY;
2626 		goto err;
2627 	}
2628 
2629 	req = embedded_payload(wrb);
2630 
2631 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2632 			       OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
2633 			       wrb, NULL);
2634 
2635 	req->src_port = port_num;
2636 	req->dest_port = port_num;
2637 	req->loopback_type = loopback_type;
2638 	req->loopback_state = enable;
2639 
2640 	status = be_mcc_notify_wait(adapter);
2641 err:
2642 	spin_unlock_bh(&adapter->mcc_lock);
2643 	return status;
2644 }
2645 
2646 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2647 			 u32 loopback_type, u32 pkt_size, u32 num_pkts,
2648 			 u64 pattern)
2649 {
2650 	struct be_mcc_wrb *wrb;
2651 	struct be_cmd_req_loopback_test *req;
2652 	struct be_cmd_resp_loopback_test *resp;
2653 	int status;
2654 
2655 	spin_lock_bh(&adapter->mcc_lock);
2656 
2657 	wrb = wrb_from_mccq(adapter);
2658 	if (!wrb) {
2659 		status = -EBUSY;
2660 		goto err;
2661 	}
2662 
2663 	req = embedded_payload(wrb);
2664 
2665 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2666 			       OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
2667 			       NULL);
2668 
2669 	req->hdr.timeout = cpu_to_le32(15);
2670 	req->pattern = cpu_to_le64(pattern);
2671 	req->src_port = cpu_to_le32(port_num);
2672 	req->dest_port = cpu_to_le32(port_num);
2673 	req->pkt_size = cpu_to_le32(pkt_size);
2674 	req->num_pkts = cpu_to_le32(num_pkts);
2675 	req->loopback_type = cpu_to_le32(loopback_type);
2676 
2677 	be_mcc_notify(adapter);
2678 
2679 	spin_unlock_bh(&adapter->mcc_lock);
2680 
2681 	wait_for_completion(&adapter->et_cmd_compl);
2682 	resp = embedded_payload(wrb);
2683 	status = le32_to_cpu(resp->status);
2684 
2685 	return status;
2686 err:
2687 	spin_unlock_bh(&adapter->mcc_lock);
2688 	return status;
2689 }
2690 
2691 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2692 			u32 byte_cnt, struct be_dma_mem *cmd)
2693 {
2694 	struct be_mcc_wrb *wrb;
2695 	struct be_cmd_req_ddrdma_test *req;
2696 	int status;
2697 	int i, j = 0;
2698 
2699 	spin_lock_bh(&adapter->mcc_lock);
2700 
2701 	wrb = wrb_from_mccq(adapter);
2702 	if (!wrb) {
2703 		status = -EBUSY;
2704 		goto err;
2705 	}
2706 	req = cmd->va;
2707 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2708 			       OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
2709 			       cmd);
2710 
2711 	req->pattern = cpu_to_le64(pattern);
2712 	req->byte_count = cpu_to_le32(byte_cnt);
2713 	for (i = 0; i < byte_cnt; i++) {
2714 		req->snd_buff[i] = (u8)(pattern >> (j*8));
2715 		j++;
2716 		if (j > 7)
2717 			j = 0;
2718 	}
2719 
2720 	status = be_mcc_notify_wait(adapter);
2721 
2722 	if (!status) {
2723 		struct be_cmd_resp_ddrdma_test *resp;
2724 
2725 		resp = cmd->va;
2726 		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2727 		    resp->snd_err) {
2728 			status = -1;
2729 		}
2730 	}
2731 
2732 err:
2733 	spin_unlock_bh(&adapter->mcc_lock);
2734 	return status;
2735 }
2736 
2737 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2738 			    struct be_dma_mem *nonemb_cmd)
2739 {
2740 	struct be_mcc_wrb *wrb;
2741 	struct be_cmd_req_seeprom_read *req;
2742 	int status;
2743 
2744 	spin_lock_bh(&adapter->mcc_lock);
2745 
2746 	wrb = wrb_from_mccq(adapter);
2747 	if (!wrb) {
2748 		status = -EBUSY;
2749 		goto err;
2750 	}
2751 	req = nonemb_cmd->va;
2752 
2753 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2754 			       OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2755 			       nonemb_cmd);
2756 
2757 	status = be_mcc_notify_wait(adapter);
2758 
2759 err:
2760 	spin_unlock_bh(&adapter->mcc_lock);
2761 	return status;
2762 }
2763 
2764 int be_cmd_get_phy_info(struct be_adapter *adapter)
2765 {
2766 	struct be_mcc_wrb *wrb;
2767 	struct be_cmd_req_get_phy_info *req;
2768 	struct be_dma_mem cmd;
2769 	int status;
2770 
2771 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2772 			    CMD_SUBSYSTEM_COMMON))
2773 		return -EPERM;
2774 
2775 	spin_lock_bh(&adapter->mcc_lock);
2776 
2777 	wrb = wrb_from_mccq(adapter);
2778 	if (!wrb) {
2779 		status = -EBUSY;
2780 		goto err;
2781 	}
2782 	cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2783 	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2784 	if (!cmd.va) {
2785 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2786 		status = -ENOMEM;
2787 		goto err;
2788 	}
2789 
2790 	req = cmd.va;
2791 
2792 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2793 			       OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2794 			       wrb, &cmd);
2795 
2796 	status = be_mcc_notify_wait(adapter);
2797 	if (!status) {
2798 		struct be_phy_info *resp_phy_info =
2799 				cmd.va + sizeof(struct be_cmd_req_hdr);
2800 
2801 		adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2802 		adapter->phy.interface_type =
2803 			le16_to_cpu(resp_phy_info->interface_type);
2804 		adapter->phy.auto_speeds_supported =
2805 			le16_to_cpu(resp_phy_info->auto_speeds_supported);
2806 		adapter->phy.fixed_speeds_supported =
2807 			le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2808 		adapter->phy.misc_params =
2809 			le32_to_cpu(resp_phy_info->misc_params);
2810 
2811 		if (BE2_chip(adapter)) {
2812 			adapter->phy.fixed_speeds_supported =
2813 				BE_SUPPORTED_SPEED_10GBPS |
2814 				BE_SUPPORTED_SPEED_1GBPS;
2815 		}
2816 	}
2817 	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2818 err:
2819 	spin_unlock_bh(&adapter->mcc_lock);
2820 	return status;
2821 }
2822 
2823 static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2824 {
2825 	struct be_mcc_wrb *wrb;
2826 	struct be_cmd_req_set_qos *req;
2827 	int status;
2828 
2829 	spin_lock_bh(&adapter->mcc_lock);
2830 
2831 	wrb = wrb_from_mccq(adapter);
2832 	if (!wrb) {
2833 		status = -EBUSY;
2834 		goto err;
2835 	}
2836 
2837 	req = embedded_payload(wrb);
2838 
2839 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2840 			       OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2841 
2842 	req->hdr.domain = domain;
2843 	req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2844 	req->max_bps_nic = cpu_to_le32(bps);
2845 
2846 	status = be_mcc_notify_wait(adapter);
2847 
2848 err:
2849 	spin_unlock_bh(&adapter->mcc_lock);
2850 	return status;
2851 }
2852 
2853 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2854 {
2855 	struct be_mcc_wrb *wrb;
2856 	struct be_cmd_req_cntl_attribs *req;
2857 	struct be_cmd_resp_cntl_attribs *resp;
2858 	int status;
2859 	int payload_len = max(sizeof(*req), sizeof(*resp));
2860 	struct mgmt_controller_attrib *attribs;
2861 	struct be_dma_mem attribs_cmd;
2862 
2863 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2864 		return -1;
2865 
2866 	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2867 	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2868 	attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2869 					      &attribs_cmd.dma);
2870 	if (!attribs_cmd.va) {
2871 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
2872 		status = -ENOMEM;
2873 		goto err;
2874 	}
2875 
2876 	wrb = wrb_from_mbox(adapter);
2877 	if (!wrb) {
2878 		status = -EBUSY;
2879 		goto err;
2880 	}
2881 	req = attribs_cmd.va;
2882 
2883 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2884 			       OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
2885 			       wrb, &attribs_cmd);
2886 
2887 	status = be_mbox_notify_wait(adapter);
2888 	if (!status) {
2889 		attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2890 		adapter->hba_port_num = attribs->hba_attribs.phy_port;
2891 	}
2892 
2893 err:
2894 	mutex_unlock(&adapter->mbox_lock);
2895 	if (attribs_cmd.va)
2896 		pci_free_consistent(adapter->pdev, attribs_cmd.size,
2897 				    attribs_cmd.va, attribs_cmd.dma);
2898 	return status;
2899 }
2900 
2901 /* Uses mbox */
2902 int be_cmd_req_native_mode(struct be_adapter *adapter)
2903 {
2904 	struct be_mcc_wrb *wrb;
2905 	struct be_cmd_req_set_func_cap *req;
2906 	int status;
2907 
2908 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2909 		return -1;
2910 
2911 	wrb = wrb_from_mbox(adapter);
2912 	if (!wrb) {
2913 		status = -EBUSY;
2914 		goto err;
2915 	}
2916 
2917 	req = embedded_payload(wrb);
2918 
2919 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2920 			       OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
2921 			       sizeof(*req), wrb, NULL);
2922 
2923 	req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2924 				CAPABILITY_BE3_NATIVE_ERX_API);
2925 	req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2926 
2927 	status = be_mbox_notify_wait(adapter);
2928 	if (!status) {
2929 		struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2930 
2931 		adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2932 					CAPABILITY_BE3_NATIVE_ERX_API;
2933 		if (!adapter->be3_native)
2934 			dev_warn(&adapter->pdev->dev,
2935 				 "adapter not in advanced mode\n");
2936 	}
2937 err:
2938 	mutex_unlock(&adapter->mbox_lock);
2939 	return status;
2940 }
2941 
2942 /* Get privilege(s) for a function */
2943 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2944 			     u32 domain)
2945 {
2946 	struct be_mcc_wrb *wrb;
2947 	struct be_cmd_req_get_fn_privileges *req;
2948 	int status;
2949 
2950 	spin_lock_bh(&adapter->mcc_lock);
2951 
2952 	wrb = wrb_from_mccq(adapter);
2953 	if (!wrb) {
2954 		status = -EBUSY;
2955 		goto err;
2956 	}
2957 
2958 	req = embedded_payload(wrb);
2959 
2960 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2961 			       OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2962 			       wrb, NULL);
2963 
2964 	req->hdr.domain = domain;
2965 
2966 	status = be_mcc_notify_wait(adapter);
2967 	if (!status) {
2968 		struct be_cmd_resp_get_fn_privileges *resp =
2969 						embedded_payload(wrb);
2970 
2971 		*privilege = le32_to_cpu(resp->privilege_mask);
2972 
2973 		/* In UMC mode FW does not return right privileges.
2974 		 * Override with correct privilege equivalent to PF.
2975 		 */
2976 		if (BEx_chip(adapter) && be_is_mc(adapter) &&
2977 		    be_physfn(adapter))
2978 			*privilege = MAX_PRIVILEGES;
2979 	}
2980 
2981 err:
2982 	spin_unlock_bh(&adapter->mcc_lock);
2983 	return status;
2984 }
2985 
2986 /* Set privilege(s) for a function */
2987 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2988 			     u32 domain)
2989 {
2990 	struct be_mcc_wrb *wrb;
2991 	struct be_cmd_req_set_fn_privileges *req;
2992 	int status;
2993 
2994 	spin_lock_bh(&adapter->mcc_lock);
2995 
2996 	wrb = wrb_from_mccq(adapter);
2997 	if (!wrb) {
2998 		status = -EBUSY;
2999 		goto err;
3000 	}
3001 
3002 	req = embedded_payload(wrb);
3003 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3004 			       OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
3005 			       wrb, NULL);
3006 	req->hdr.domain = domain;
3007 	if (lancer_chip(adapter))
3008 		req->privileges_lancer = cpu_to_le32(privileges);
3009 	else
3010 		req->privileges = cpu_to_le32(privileges);
3011 
3012 	status = be_mcc_notify_wait(adapter);
3013 err:
3014 	spin_unlock_bh(&adapter->mcc_lock);
3015 	return status;
3016 }
3017 
3018 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
3019  * pmac_id_valid: false => pmac_id or MAC address is requested.
3020  *		  If pmac_id is returned, pmac_id_valid is returned as true
3021  */
3022 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
3023 			     bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
3024 			     u8 domain)
3025 {
3026 	struct be_mcc_wrb *wrb;
3027 	struct be_cmd_req_get_mac_list *req;
3028 	int status;
3029 	int mac_count;
3030 	struct be_dma_mem get_mac_list_cmd;
3031 	int i;
3032 
3033 	memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
3034 	get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
3035 	get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
3036 						   get_mac_list_cmd.size,
3037 						   &get_mac_list_cmd.dma);
3038 
3039 	if (!get_mac_list_cmd.va) {
3040 		dev_err(&adapter->pdev->dev,
3041 			"Memory allocation failure during GET_MAC_LIST\n");
3042 		return -ENOMEM;
3043 	}
3044 
3045 	spin_lock_bh(&adapter->mcc_lock);
3046 
3047 	wrb = wrb_from_mccq(adapter);
3048 	if (!wrb) {
3049 		status = -EBUSY;
3050 		goto out;
3051 	}
3052 
3053 	req = get_mac_list_cmd.va;
3054 
3055 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3056 			       OPCODE_COMMON_GET_MAC_LIST,
3057 			       get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
3058 	req->hdr.domain = domain;
3059 	req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
3060 	if (*pmac_id_valid) {
3061 		req->mac_id = cpu_to_le32(*pmac_id);
3062 		req->iface_id = cpu_to_le16(if_handle);
3063 		req->perm_override = 0;
3064 	} else {
3065 		req->perm_override = 1;
3066 	}
3067 
3068 	status = be_mcc_notify_wait(adapter);
3069 	if (!status) {
3070 		struct be_cmd_resp_get_mac_list *resp =
3071 						get_mac_list_cmd.va;
3072 
3073 		if (*pmac_id_valid) {
3074 			memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
3075 			       ETH_ALEN);
3076 			goto out;
3077 		}
3078 
3079 		mac_count = resp->true_mac_count + resp->pseudo_mac_count;
3080 		/* Mac list returned could contain one or more active mac_ids
3081 		 * or one or more true or pseudo permanant mac addresses.
3082 		 * If an active mac_id is present, return first active mac_id
3083 		 * found.
3084 		 */
3085 		for (i = 0; i < mac_count; i++) {
3086 			struct get_list_macaddr *mac_entry;
3087 			u16 mac_addr_size;
3088 			u32 mac_id;
3089 
3090 			mac_entry = &resp->macaddr_list[i];
3091 			mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
3092 			/* mac_id is a 32 bit value and mac_addr size
3093 			 * is 6 bytes
3094 			 */
3095 			if (mac_addr_size == sizeof(u32)) {
3096 				*pmac_id_valid = true;
3097 				mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
3098 				*pmac_id = le32_to_cpu(mac_id);
3099 				goto out;
3100 			}
3101 		}
3102 		/* If no active mac_id found, return first mac addr */
3103 		*pmac_id_valid = false;
3104 		memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
3105 		       ETH_ALEN);
3106 	}
3107 
3108 out:
3109 	spin_unlock_bh(&adapter->mcc_lock);
3110 	pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
3111 			    get_mac_list_cmd.va, get_mac_list_cmd.dma);
3112 	return status;
3113 }
3114 
3115 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
3116 			  u8 *mac, u32 if_handle, bool active, u32 domain)
3117 {
3118 	if (!active)
3119 		be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
3120 					 if_handle, domain);
3121 	if (BEx_chip(adapter))
3122 		return be_cmd_mac_addr_query(adapter, mac, false,
3123 					     if_handle, curr_pmac_id);
3124 	else
3125 		/* Fetch the MAC address using pmac_id */
3126 		return be_cmd_get_mac_from_list(adapter, mac, &active,
3127 						&curr_pmac_id,
3128 						if_handle, domain);
3129 }
3130 
3131 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
3132 {
3133 	int status;
3134 	bool pmac_valid = false;
3135 
3136 	memset(mac, 0, ETH_ALEN);
3137 
3138 	if (BEx_chip(adapter)) {
3139 		if (be_physfn(adapter))
3140 			status = be_cmd_mac_addr_query(adapter, mac, true, 0,
3141 						       0);
3142 		else
3143 			status = be_cmd_mac_addr_query(adapter, mac, false,
3144 						       adapter->if_handle, 0);
3145 	} else {
3146 		status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
3147 						  NULL, adapter->if_handle, 0);
3148 	}
3149 
3150 	return status;
3151 }
3152 
3153 /* Uses synchronous MCCQ */
3154 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3155 			u8 mac_count, u32 domain)
3156 {
3157 	struct be_mcc_wrb *wrb;
3158 	struct be_cmd_req_set_mac_list *req;
3159 	int status;
3160 	struct be_dma_mem cmd;
3161 
3162 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3163 	cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3164 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
3165 				    &cmd.dma, GFP_KERNEL);
3166 	if (!cmd.va)
3167 		return -ENOMEM;
3168 
3169 	spin_lock_bh(&adapter->mcc_lock);
3170 
3171 	wrb = wrb_from_mccq(adapter);
3172 	if (!wrb) {
3173 		status = -EBUSY;
3174 		goto err;
3175 	}
3176 
3177 	req = cmd.va;
3178 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3179 			       OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
3180 			       wrb, &cmd);
3181 
3182 	req->hdr.domain = domain;
3183 	req->mac_count = mac_count;
3184 	if (mac_count)
3185 		memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
3186 
3187 	status = be_mcc_notify_wait(adapter);
3188 
3189 err:
3190 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3191 	spin_unlock_bh(&adapter->mcc_lock);
3192 	return status;
3193 }
3194 
3195 /* Wrapper to delete any active MACs and provision the new mac.
3196  * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3197  * current list are active.
3198  */
3199 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
3200 {
3201 	bool active_mac = false;
3202 	u8 old_mac[ETH_ALEN];
3203 	u32 pmac_id;
3204 	int status;
3205 
3206 	status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
3207 					  &pmac_id, if_id, dom);
3208 
3209 	if (!status && active_mac)
3210 		be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
3211 
3212 	return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
3213 }
3214 
3215 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3216 			  u32 domain, u16 intf_id, u16 hsw_mode)
3217 {
3218 	struct be_mcc_wrb *wrb;
3219 	struct be_cmd_req_set_hsw_config *req;
3220 	void *ctxt;
3221 	int status;
3222 
3223 	spin_lock_bh(&adapter->mcc_lock);
3224 
3225 	wrb = wrb_from_mccq(adapter);
3226 	if (!wrb) {
3227 		status = -EBUSY;
3228 		goto err;
3229 	}
3230 
3231 	req = embedded_payload(wrb);
3232 	ctxt = &req->context;
3233 
3234 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3235 			       OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3236 			       NULL);
3237 
3238 	req->hdr.domain = domain;
3239 	AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3240 	if (pvid) {
3241 		AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3242 		AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3243 	}
3244 	if (!BEx_chip(adapter) && hsw_mode) {
3245 		AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3246 			      ctxt, adapter->hba_port_num);
3247 		AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3248 		AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3249 			      ctxt, hsw_mode);
3250 	}
3251 
3252 	be_dws_cpu_to_le(req->context, sizeof(req->context));
3253 	status = be_mcc_notify_wait(adapter);
3254 
3255 err:
3256 	spin_unlock_bh(&adapter->mcc_lock);
3257 	return status;
3258 }
3259 
3260 /* Get Hyper switch config */
3261 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3262 			  u32 domain, u16 intf_id, u8 *mode)
3263 {
3264 	struct be_mcc_wrb *wrb;
3265 	struct be_cmd_req_get_hsw_config *req;
3266 	void *ctxt;
3267 	int status;
3268 	u16 vid;
3269 
3270 	spin_lock_bh(&adapter->mcc_lock);
3271 
3272 	wrb = wrb_from_mccq(adapter);
3273 	if (!wrb) {
3274 		status = -EBUSY;
3275 		goto err;
3276 	}
3277 
3278 	req = embedded_payload(wrb);
3279 	ctxt = &req->context;
3280 
3281 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3282 			       OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3283 			       NULL);
3284 
3285 	req->hdr.domain = domain;
3286 	AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3287 		      ctxt, intf_id);
3288 	AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
3289 
3290 	if (!BEx_chip(adapter) && mode) {
3291 		AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3292 			      ctxt, adapter->hba_port_num);
3293 		AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3294 	}
3295 	be_dws_cpu_to_le(req->context, sizeof(req->context));
3296 
3297 	status = be_mcc_notify_wait(adapter);
3298 	if (!status) {
3299 		struct be_cmd_resp_get_hsw_config *resp =
3300 						embedded_payload(wrb);
3301 
3302 		be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3303 		vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3304 				    pvid, &resp->context);
3305 		if (pvid)
3306 			*pvid = le16_to_cpu(vid);
3307 		if (mode)
3308 			*mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3309 					      port_fwd_type, &resp->context);
3310 	}
3311 
3312 err:
3313 	spin_unlock_bh(&adapter->mcc_lock);
3314 	return status;
3315 }
3316 
3317 static bool be_is_wol_excluded(struct be_adapter *adapter)
3318 {
3319 	struct pci_dev *pdev = adapter->pdev;
3320 
3321 	if (!be_physfn(adapter))
3322 		return true;
3323 
3324 	switch (pdev->subsystem_device) {
3325 	case OC_SUBSYS_DEVICE_ID1:
3326 	case OC_SUBSYS_DEVICE_ID2:
3327 	case OC_SUBSYS_DEVICE_ID3:
3328 	case OC_SUBSYS_DEVICE_ID4:
3329 		return true;
3330 	default:
3331 		return false;
3332 	}
3333 }
3334 
3335 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3336 {
3337 	struct be_mcc_wrb *wrb;
3338 	struct be_cmd_req_acpi_wol_magic_config_v1 *req;
3339 	int status = 0;
3340 	struct be_dma_mem cmd;
3341 
3342 	if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3343 			    CMD_SUBSYSTEM_ETH))
3344 		return -EPERM;
3345 
3346 	if (be_is_wol_excluded(adapter))
3347 		return status;
3348 
3349 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3350 		return -1;
3351 
3352 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3353 	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3354 	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3355 	if (!cmd.va) {
3356 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3357 		status = -ENOMEM;
3358 		goto err;
3359 	}
3360 
3361 	wrb = wrb_from_mbox(adapter);
3362 	if (!wrb) {
3363 		status = -EBUSY;
3364 		goto err;
3365 	}
3366 
3367 	req = cmd.va;
3368 
3369 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3370 			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3371 			       sizeof(*req), wrb, &cmd);
3372 
3373 	req->hdr.version = 1;
3374 	req->query_options = BE_GET_WOL_CAP;
3375 
3376 	status = be_mbox_notify_wait(adapter);
3377 	if (!status) {
3378 		struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3379 
3380 		resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
3381 
3382 		adapter->wol_cap = resp->wol_settings;
3383 		if (adapter->wol_cap & BE_WOL_CAP)
3384 			adapter->wol_en = true;
3385 	}
3386 err:
3387 	mutex_unlock(&adapter->mbox_lock);
3388 	if (cmd.va)
3389 		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3390 	return status;
3391 
3392 }
3393 
3394 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
3395 {
3396 	struct be_dma_mem extfat_cmd;
3397 	struct be_fat_conf_params *cfgs;
3398 	int status;
3399 	int i, j;
3400 
3401 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3402 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3403 	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3404 					     &extfat_cmd.dma);
3405 	if (!extfat_cmd.va)
3406 		return -ENOMEM;
3407 
3408 	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3409 	if (status)
3410 		goto err;
3411 
3412 	cfgs = (struct be_fat_conf_params *)
3413 			(extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
3414 	for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
3415 		u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
3416 
3417 		for (j = 0; j < num_modes; j++) {
3418 			if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
3419 				cfgs->module[i].trace_lvl[j].dbg_lvl =
3420 							cpu_to_le32(level);
3421 		}
3422 	}
3423 
3424 	status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
3425 err:
3426 	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3427 			    extfat_cmd.dma);
3428 	return status;
3429 }
3430 
3431 int be_cmd_get_fw_log_level(struct be_adapter *adapter)
3432 {
3433 	struct be_dma_mem extfat_cmd;
3434 	struct be_fat_conf_params *cfgs;
3435 	int status, j;
3436 	int level = 0;
3437 
3438 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3439 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3440 	extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3441 					     &extfat_cmd.dma);
3442 
3443 	if (!extfat_cmd.va) {
3444 		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3445 			__func__);
3446 		goto err;
3447 	}
3448 
3449 	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3450 	if (!status) {
3451 		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3452 						sizeof(struct be_cmd_resp_hdr));
3453 
3454 		for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3455 			if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3456 				level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3457 		}
3458 	}
3459 	pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3460 			    extfat_cmd.dma);
3461 err:
3462 	return level;
3463 }
3464 
3465 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3466 				   struct be_dma_mem *cmd)
3467 {
3468 	struct be_mcc_wrb *wrb;
3469 	struct be_cmd_req_get_ext_fat_caps *req;
3470 	int status;
3471 
3472 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3473 		return -1;
3474 
3475 	wrb = wrb_from_mbox(adapter);
3476 	if (!wrb) {
3477 		status = -EBUSY;
3478 		goto err;
3479 	}
3480 
3481 	req = cmd->va;
3482 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3483 			       OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3484 			       cmd->size, wrb, cmd);
3485 	req->parameter_type = cpu_to_le32(1);
3486 
3487 	status = be_mbox_notify_wait(adapter);
3488 err:
3489 	mutex_unlock(&adapter->mbox_lock);
3490 	return status;
3491 }
3492 
3493 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3494 				   struct be_dma_mem *cmd,
3495 				   struct be_fat_conf_params *configs)
3496 {
3497 	struct be_mcc_wrb *wrb;
3498 	struct be_cmd_req_set_ext_fat_caps *req;
3499 	int status;
3500 
3501 	spin_lock_bh(&adapter->mcc_lock);
3502 
3503 	wrb = wrb_from_mccq(adapter);
3504 	if (!wrb) {
3505 		status = -EBUSY;
3506 		goto err;
3507 	}
3508 
3509 	req = cmd->va;
3510 	memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3511 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3512 			       OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3513 			       cmd->size, wrb, cmd);
3514 
3515 	status = be_mcc_notify_wait(adapter);
3516 err:
3517 	spin_unlock_bh(&adapter->mcc_lock);
3518 	return status;
3519 }
3520 
3521 int be_cmd_query_port_name(struct be_adapter *adapter)
3522 {
3523 	struct be_cmd_req_get_port_name *req;
3524 	struct be_mcc_wrb *wrb;
3525 	int status;
3526 
3527 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3528 		return -1;
3529 
3530 	wrb = wrb_from_mbox(adapter);
3531 	req = embedded_payload(wrb);
3532 
3533 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3534 			       OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3535 			       NULL);
3536 	if (!BEx_chip(adapter))
3537 		req->hdr.version = 1;
3538 
3539 	status = be_mbox_notify_wait(adapter);
3540 	if (!status) {
3541 		struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3542 
3543 		adapter->port_name = resp->port_name[adapter->hba_port_num];
3544 	} else {
3545 		adapter->port_name = adapter->hba_port_num + '0';
3546 	}
3547 
3548 	mutex_unlock(&adapter->mbox_lock);
3549 	return status;
3550 }
3551 
3552 /* Descriptor type */
3553 enum {
3554 	FUNC_DESC = 1,
3555 	VFT_DESC = 2
3556 };
3557 
3558 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
3559 					       int desc_type)
3560 {
3561 	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3562 	struct be_nic_res_desc *nic;
3563 	int i;
3564 
3565 	for (i = 0; i < desc_count; i++) {
3566 		if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3567 		    hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
3568 			nic = (struct be_nic_res_desc *)hdr;
3569 			if (desc_type == FUNC_DESC ||
3570 			    (desc_type == VFT_DESC &&
3571 			     nic->flags & (1 << VFT_SHIFT)))
3572 				return nic;
3573 		}
3574 
3575 		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3576 		hdr = (void *)hdr + hdr->desc_len;
3577 	}
3578 	return NULL;
3579 }
3580 
3581 static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
3582 {
3583 	return be_get_nic_desc(buf, desc_count, VFT_DESC);
3584 }
3585 
3586 static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
3587 {
3588 	return be_get_nic_desc(buf, desc_count, FUNC_DESC);
3589 }
3590 
3591 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3592 						 u32 desc_count)
3593 {
3594 	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3595 	struct be_pcie_res_desc *pcie;
3596 	int i;
3597 
3598 	for (i = 0; i < desc_count; i++) {
3599 		if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3600 		     hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3601 			pcie = (struct be_pcie_res_desc	*)hdr;
3602 			if (pcie->pf_num == devfn)
3603 				return pcie;
3604 		}
3605 
3606 		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3607 		hdr = (void *)hdr + hdr->desc_len;
3608 	}
3609 	return NULL;
3610 }
3611 
3612 static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
3613 {
3614 	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3615 	int i;
3616 
3617 	for (i = 0; i < desc_count; i++) {
3618 		if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
3619 			return (struct be_port_res_desc *)hdr;
3620 
3621 		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3622 		hdr = (void *)hdr + hdr->desc_len;
3623 	}
3624 	return NULL;
3625 }
3626 
3627 static void be_copy_nic_desc(struct be_resources *res,
3628 			     struct be_nic_res_desc *desc)
3629 {
3630 	res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3631 	res->max_vlans = le16_to_cpu(desc->vlan_count);
3632 	res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3633 	res->max_tx_qs = le16_to_cpu(desc->txq_count);
3634 	res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3635 	res->max_rx_qs = le16_to_cpu(desc->rq_count);
3636 	res->max_evt_qs = le16_to_cpu(desc->eq_count);
3637 	/* Clear flags that driver is not interested in */
3638 	res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3639 				BE_IF_CAP_FLAGS_WANT;
3640 	/* Need 1 RXQ as the default RXQ */
3641 	if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
3642 		res->max_rss_qs -= 1;
3643 }
3644 
3645 /* Uses Mbox */
3646 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3647 {
3648 	struct be_mcc_wrb *wrb;
3649 	struct be_cmd_req_get_func_config *req;
3650 	int status;
3651 	struct be_dma_mem cmd;
3652 
3653 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3654 		return -1;
3655 
3656 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3657 	cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3658 	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3659 	if (!cmd.va) {
3660 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3661 		status = -ENOMEM;
3662 		goto err;
3663 	}
3664 
3665 	wrb = wrb_from_mbox(adapter);
3666 	if (!wrb) {
3667 		status = -EBUSY;
3668 		goto err;
3669 	}
3670 
3671 	req = cmd.va;
3672 
3673 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3674 			       OPCODE_COMMON_GET_FUNC_CONFIG,
3675 			       cmd.size, wrb, &cmd);
3676 
3677 	if (skyhawk_chip(adapter))
3678 		req->hdr.version = 1;
3679 
3680 	status = be_mbox_notify_wait(adapter);
3681 	if (!status) {
3682 		struct be_cmd_resp_get_func_config *resp = cmd.va;
3683 		u32 desc_count = le32_to_cpu(resp->desc_count);
3684 		struct be_nic_res_desc *desc;
3685 
3686 		desc = be_get_func_nic_desc(resp->func_param, desc_count);
3687 		if (!desc) {
3688 			status = -EINVAL;
3689 			goto err;
3690 		}
3691 
3692 		adapter->pf_number = desc->pf_num;
3693 		be_copy_nic_desc(res, desc);
3694 	}
3695 err:
3696 	mutex_unlock(&adapter->mbox_lock);
3697 	if (cmd.va)
3698 		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3699 	return status;
3700 }
3701 
3702 /* Will use MBOX only if MCCQ has not been created */
3703 int be_cmd_get_profile_config(struct be_adapter *adapter,
3704 			      struct be_resources *res, u8 domain)
3705 {
3706 	struct be_cmd_resp_get_profile_config *resp;
3707 	struct be_cmd_req_get_profile_config *req;
3708 	struct be_nic_res_desc *vf_res;
3709 	struct be_pcie_res_desc *pcie;
3710 	struct be_port_res_desc *port;
3711 	struct be_nic_res_desc *nic;
3712 	struct be_mcc_wrb wrb = {0};
3713 	struct be_dma_mem cmd;
3714 	u32 desc_count;
3715 	int status;
3716 
3717 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3718 	cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3719 	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3720 	if (!cmd.va)
3721 		return -ENOMEM;
3722 
3723 	req = cmd.va;
3724 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3725 			       OPCODE_COMMON_GET_PROFILE_CONFIG,
3726 			       cmd.size, &wrb, &cmd);
3727 
3728 	req->hdr.domain = domain;
3729 	if (!lancer_chip(adapter))
3730 		req->hdr.version = 1;
3731 	req->type = ACTIVE_PROFILE_TYPE;
3732 
3733 	status = be_cmd_notify_wait(adapter, &wrb);
3734 	if (status)
3735 		goto err;
3736 
3737 	resp = cmd.va;
3738 	desc_count = le32_to_cpu(resp->desc_count);
3739 
3740 	pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3741 				desc_count);
3742 	if (pcie)
3743 		res->max_vfs = le16_to_cpu(pcie->num_vfs);
3744 
3745 	port = be_get_port_desc(resp->func_param, desc_count);
3746 	if (port)
3747 		adapter->mc_type = port->mc_type;
3748 
3749 	nic = be_get_func_nic_desc(resp->func_param, desc_count);
3750 	if (nic)
3751 		be_copy_nic_desc(res, nic);
3752 
3753 	vf_res = be_get_vft_desc(resp->func_param, desc_count);
3754 	if (vf_res)
3755 		res->vf_if_cap_flags = vf_res->cap_flags;
3756 err:
3757 	if (cmd.va)
3758 		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3759 	return status;
3760 }
3761 
3762 /* Will use MBOX only if MCCQ has not been created */
3763 static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
3764 				     int size, int count, u8 version, u8 domain)
3765 {
3766 	struct be_cmd_req_set_profile_config *req;
3767 	struct be_mcc_wrb wrb = {0};
3768 	struct be_dma_mem cmd;
3769 	int status;
3770 
3771 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3772 	cmd.size = sizeof(struct be_cmd_req_set_profile_config);
3773 	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3774 	if (!cmd.va)
3775 		return -ENOMEM;
3776 
3777 	req = cmd.va;
3778 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3779 			       OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
3780 			       &wrb, &cmd);
3781 	req->hdr.version = version;
3782 	req->hdr.domain = domain;
3783 	req->desc_count = cpu_to_le32(count);
3784 	memcpy(req->desc, desc, size);
3785 
3786 	status = be_cmd_notify_wait(adapter, &wrb);
3787 
3788 	if (cmd.va)
3789 		pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3790 	return status;
3791 }
3792 
3793 /* Mark all fields invalid */
3794 static void be_reset_nic_desc(struct be_nic_res_desc *nic)
3795 {
3796 	memset(nic, 0, sizeof(*nic));
3797 	nic->unicast_mac_count = 0xFFFF;
3798 	nic->mcc_count = 0xFFFF;
3799 	nic->vlan_count = 0xFFFF;
3800 	nic->mcast_mac_count = 0xFFFF;
3801 	nic->txq_count = 0xFFFF;
3802 	nic->rq_count = 0xFFFF;
3803 	nic->rssq_count = 0xFFFF;
3804 	nic->lro_count = 0xFFFF;
3805 	nic->cq_count = 0xFFFF;
3806 	nic->toe_conn_count = 0xFFFF;
3807 	nic->eq_count = 0xFFFF;
3808 	nic->iface_count = 0xFFFF;
3809 	nic->link_param = 0xFF;
3810 	nic->channel_id_param = cpu_to_le16(0xF000);
3811 	nic->acpi_params = 0xFF;
3812 	nic->wol_param = 0x0F;
3813 	nic->tunnel_iface_count = 0xFFFF;
3814 	nic->direct_tenant_iface_count = 0xFFFF;
3815 	nic->bw_min = 0xFFFFFFFF;
3816 	nic->bw_max = 0xFFFFFFFF;
3817 }
3818 
3819 /* Mark all fields invalid */
3820 static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
3821 {
3822 	memset(pcie, 0, sizeof(*pcie));
3823 	pcie->sriov_state = 0xFF;
3824 	pcie->pf_state = 0xFF;
3825 	pcie->pf_type = 0xFF;
3826 	pcie->num_vfs = 0xFFFF;
3827 }
3828 
3829 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
3830 		      u8 domain)
3831 {
3832 	struct be_nic_res_desc nic_desc;
3833 	u32 bw_percent;
3834 	u16 version = 0;
3835 
3836 	if (BE3_chip(adapter))
3837 		return be_cmd_set_qos(adapter, max_rate / 10, domain);
3838 
3839 	be_reset_nic_desc(&nic_desc);
3840 	nic_desc.pf_num = adapter->pf_number;
3841 	nic_desc.vf_num = domain;
3842 	nic_desc.bw_min = 0;
3843 	if (lancer_chip(adapter)) {
3844 		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3845 		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3846 		nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
3847 					(1 << NOSV_SHIFT);
3848 		nic_desc.bw_max = cpu_to_le32(max_rate / 10);
3849 	} else {
3850 		version = 1;
3851 		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3852 		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3853 		nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3854 		bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
3855 		nic_desc.bw_max = cpu_to_le32(bw_percent);
3856 	}
3857 
3858 	return be_cmd_set_profile_config(adapter, &nic_desc,
3859 					 nic_desc.hdr.desc_len,
3860 					 1, version, domain);
3861 }
3862 
3863 int be_cmd_set_sriov_config(struct be_adapter *adapter,
3864 			    struct be_resources res, u16 num_vfs)
3865 {
3866 	struct {
3867 		struct be_pcie_res_desc pcie;
3868 		struct be_nic_res_desc nic_vft;
3869 	} __packed desc;
3870 	u16 vf_q_count;
3871 
3872 	if (BEx_chip(adapter) || lancer_chip(adapter))
3873 		return 0;
3874 
3875 	/* PF PCIE descriptor */
3876 	be_reset_pcie_desc(&desc.pcie);
3877 	desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
3878 	desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3879 	desc.pcie.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3880 	desc.pcie.pf_num = adapter->pdev->devfn;
3881 	desc.pcie.sriov_state = num_vfs ? 1 : 0;
3882 	desc.pcie.num_vfs = cpu_to_le16(num_vfs);
3883 
3884 	/* VF NIC Template descriptor */
3885 	be_reset_nic_desc(&desc.nic_vft);
3886 	desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
3887 	desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3888 	desc.nic_vft.flags = (1 << VFT_SHIFT) | (1 << IMM_SHIFT) |
3889 				(1 << NOSV_SHIFT);
3890 	desc.nic_vft.pf_num = adapter->pdev->devfn;
3891 	desc.nic_vft.vf_num = 0;
3892 
3893 	if (num_vfs && res.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3894 		/* If number of VFs requested is 8 less than max supported,
3895 		 * assign 8 queue pairs to the PF and divide the remaining
3896 		 * resources evenly among the VFs
3897 		 */
3898 		if (num_vfs < (be_max_vfs(adapter) - 8))
3899 			vf_q_count = (res.max_rss_qs - 8) / num_vfs;
3900 		else
3901 			vf_q_count = res.max_rss_qs / num_vfs;
3902 
3903 		desc.nic_vft.rq_count = cpu_to_le16(vf_q_count);
3904 		desc.nic_vft.txq_count = cpu_to_le16(vf_q_count);
3905 		desc.nic_vft.rssq_count = cpu_to_le16(vf_q_count - 1);
3906 		desc.nic_vft.cq_count = cpu_to_le16(3 * vf_q_count);
3907 	} else {
3908 		desc.nic_vft.txq_count = cpu_to_le16(1);
3909 		desc.nic_vft.rq_count = cpu_to_le16(1);
3910 		desc.nic_vft.rssq_count = cpu_to_le16(0);
3911 		/* One CQ for each TX, RX and MCCQ */
3912 		desc.nic_vft.cq_count = cpu_to_le16(3);
3913 	}
3914 
3915 	return be_cmd_set_profile_config(adapter, &desc,
3916 					 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
3917 }
3918 
3919 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
3920 {
3921 	struct be_mcc_wrb *wrb;
3922 	struct be_cmd_req_manage_iface_filters *req;
3923 	int status;
3924 
3925 	if (iface == 0xFFFFFFFF)
3926 		return -1;
3927 
3928 	spin_lock_bh(&adapter->mcc_lock);
3929 
3930 	wrb = wrb_from_mccq(adapter);
3931 	if (!wrb) {
3932 		status = -EBUSY;
3933 		goto err;
3934 	}
3935 	req = embedded_payload(wrb);
3936 
3937 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3938 			       OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
3939 			       wrb, NULL);
3940 	req->op = op;
3941 	req->target_iface_id = cpu_to_le32(iface);
3942 
3943 	status = be_mcc_notify_wait(adapter);
3944 err:
3945 	spin_unlock_bh(&adapter->mcc_lock);
3946 	return status;
3947 }
3948 
3949 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
3950 {
3951 	struct be_port_res_desc port_desc;
3952 
3953 	memset(&port_desc, 0, sizeof(port_desc));
3954 	port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
3955 	port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
3956 	port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
3957 	port_desc.link_num = adapter->hba_port_num;
3958 	if (port) {
3959 		port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
3960 					(1 << RCVID_SHIFT);
3961 		port_desc.nv_port = swab16(port);
3962 	} else {
3963 		port_desc.nv_flags = NV_TYPE_DISABLED;
3964 		port_desc.nv_port = 0;
3965 	}
3966 
3967 	return be_cmd_set_profile_config(adapter, &port_desc,
3968 					 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
3969 }
3970 
3971 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3972 		     int vf_num)
3973 {
3974 	struct be_mcc_wrb *wrb;
3975 	struct be_cmd_req_get_iface_list *req;
3976 	struct be_cmd_resp_get_iface_list *resp;
3977 	int status;
3978 
3979 	spin_lock_bh(&adapter->mcc_lock);
3980 
3981 	wrb = wrb_from_mccq(adapter);
3982 	if (!wrb) {
3983 		status = -EBUSY;
3984 		goto err;
3985 	}
3986 	req = embedded_payload(wrb);
3987 
3988 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3989 			       OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3990 			       wrb, NULL);
3991 	req->hdr.domain = vf_num + 1;
3992 
3993 	status = be_mcc_notify_wait(adapter);
3994 	if (!status) {
3995 		resp = (struct be_cmd_resp_get_iface_list *)req;
3996 		vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3997 	}
3998 
3999 err:
4000 	spin_unlock_bh(&adapter->mcc_lock);
4001 	return status;
4002 }
4003 
4004 static int lancer_wait_idle(struct be_adapter *adapter)
4005 {
4006 #define SLIPORT_IDLE_TIMEOUT 30
4007 	u32 reg_val;
4008 	int status = 0, i;
4009 
4010 	for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
4011 		reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
4012 		if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
4013 			break;
4014 
4015 		ssleep(1);
4016 	}
4017 
4018 	if (i == SLIPORT_IDLE_TIMEOUT)
4019 		status = -1;
4020 
4021 	return status;
4022 }
4023 
4024 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
4025 {
4026 	int status = 0;
4027 
4028 	status = lancer_wait_idle(adapter);
4029 	if (status)
4030 		return status;
4031 
4032 	iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
4033 
4034 	return status;
4035 }
4036 
4037 /* Routine to check whether dump image is present or not */
4038 bool dump_present(struct be_adapter *adapter)
4039 {
4040 	u32 sliport_status = 0;
4041 
4042 	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
4043 	return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
4044 }
4045 
4046 int lancer_initiate_dump(struct be_adapter *adapter)
4047 {
4048 	struct device *dev = &adapter->pdev->dev;
4049 	int status;
4050 
4051 	if (dump_present(adapter)) {
4052 		dev_info(dev, "Previous dump not cleared, not forcing dump\n");
4053 		return -EEXIST;
4054 	}
4055 
4056 	/* give firmware reset and diagnostic dump */
4057 	status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
4058 				     PHYSDEV_CONTROL_DD_MASK);
4059 	if (status < 0) {
4060 		dev_err(dev, "FW reset failed\n");
4061 		return status;
4062 	}
4063 
4064 	status = lancer_wait_idle(adapter);
4065 	if (status)
4066 		return status;
4067 
4068 	if (!dump_present(adapter)) {
4069 		dev_err(dev, "FW dump not generated\n");
4070 		return -EIO;
4071 	}
4072 
4073 	return 0;
4074 }
4075 
4076 int lancer_delete_dump(struct be_adapter *adapter)
4077 {
4078 	int status;
4079 
4080 	status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
4081 	return be_cmd_status(status);
4082 }
4083 
4084 /* Uses sync mcc */
4085 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
4086 {
4087 	struct be_mcc_wrb *wrb;
4088 	struct be_cmd_enable_disable_vf *req;
4089 	int status;
4090 
4091 	if (BEx_chip(adapter))
4092 		return 0;
4093 
4094 	spin_lock_bh(&adapter->mcc_lock);
4095 
4096 	wrb = wrb_from_mccq(adapter);
4097 	if (!wrb) {
4098 		status = -EBUSY;
4099 		goto err;
4100 	}
4101 
4102 	req = embedded_payload(wrb);
4103 
4104 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4105 			       OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
4106 			       wrb, NULL);
4107 
4108 	req->hdr.domain = domain;
4109 	req->enable = 1;
4110 	status = be_mcc_notify_wait(adapter);
4111 err:
4112 	spin_unlock_bh(&adapter->mcc_lock);
4113 	return status;
4114 }
4115 
4116 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
4117 {
4118 	struct be_mcc_wrb *wrb;
4119 	struct be_cmd_req_intr_set *req;
4120 	int status;
4121 
4122 	if (mutex_lock_interruptible(&adapter->mbox_lock))
4123 		return -1;
4124 
4125 	wrb = wrb_from_mbox(adapter);
4126 
4127 	req = embedded_payload(wrb);
4128 
4129 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4130 			       OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
4131 			       wrb, NULL);
4132 
4133 	req->intr_enabled = intr_enable;
4134 
4135 	status = be_mbox_notify_wait(adapter);
4136 
4137 	mutex_unlock(&adapter->mbox_lock);
4138 	return status;
4139 }
4140 
4141 /* Uses MBOX */
4142 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
4143 {
4144 	struct be_cmd_req_get_active_profile *req;
4145 	struct be_mcc_wrb *wrb;
4146 	int status;
4147 
4148 	if (mutex_lock_interruptible(&adapter->mbox_lock))
4149 		return -1;
4150 
4151 	wrb = wrb_from_mbox(adapter);
4152 	if (!wrb) {
4153 		status = -EBUSY;
4154 		goto err;
4155 	}
4156 
4157 	req = embedded_payload(wrb);
4158 
4159 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4160 			       OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
4161 			       wrb, NULL);
4162 
4163 	status = be_mbox_notify_wait(adapter);
4164 	if (!status) {
4165 		struct be_cmd_resp_get_active_profile *resp =
4166 							embedded_payload(wrb);
4167 
4168 		*profile_id = le16_to_cpu(resp->active_profile_id);
4169 	}
4170 
4171 err:
4172 	mutex_unlock(&adapter->mbox_lock);
4173 	return status;
4174 }
4175 
4176 int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4177 				   int link_state, u8 domain)
4178 {
4179 	struct be_mcc_wrb *wrb;
4180 	struct be_cmd_req_set_ll_link *req;
4181 	int status;
4182 
4183 	if (BEx_chip(adapter) || lancer_chip(adapter))
4184 		return -EOPNOTSUPP;
4185 
4186 	spin_lock_bh(&adapter->mcc_lock);
4187 
4188 	wrb = wrb_from_mccq(adapter);
4189 	if (!wrb) {
4190 		status = -EBUSY;
4191 		goto err;
4192 	}
4193 
4194 	req = embedded_payload(wrb);
4195 
4196 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4197 			       OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
4198 			       sizeof(*req), wrb, NULL);
4199 
4200 	req->hdr.version = 1;
4201 	req->hdr.domain = domain;
4202 
4203 	if (link_state == IFLA_VF_LINK_STATE_ENABLE)
4204 		req->link_config |= 1;
4205 
4206 	if (link_state == IFLA_VF_LINK_STATE_AUTO)
4207 		req->link_config |= 1 << PLINK_TRACK_SHIFT;
4208 
4209 	status = be_mcc_notify_wait(adapter);
4210 err:
4211 	spin_unlock_bh(&adapter->mcc_lock);
4212 	return status;
4213 }
4214 
4215 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
4216 		    int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
4217 {
4218 	struct be_adapter *adapter = netdev_priv(netdev_handle);
4219 	struct be_mcc_wrb *wrb;
4220 	struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
4221 	struct be_cmd_req_hdr *req;
4222 	struct be_cmd_resp_hdr *resp;
4223 	int status;
4224 
4225 	spin_lock_bh(&adapter->mcc_lock);
4226 
4227 	wrb = wrb_from_mccq(adapter);
4228 	if (!wrb) {
4229 		status = -EBUSY;
4230 		goto err;
4231 	}
4232 	req = embedded_payload(wrb);
4233 	resp = embedded_payload(wrb);
4234 
4235 	be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
4236 			       hdr->opcode, wrb_payload_size, wrb, NULL);
4237 	memcpy(req, wrb_payload, wrb_payload_size);
4238 	be_dws_cpu_to_le(req, wrb_payload_size);
4239 
4240 	status = be_mcc_notify_wait(adapter);
4241 	if (cmd_status)
4242 		*cmd_status = (status & 0xffff);
4243 	if (ext_status)
4244 		*ext_status = 0;
4245 	memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
4246 	be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
4247 err:
4248 	spin_unlock_bh(&adapter->mcc_lock);
4249 	return status;
4250 }
4251 EXPORT_SYMBOL(be_roce_mcc_cmd);
4252