1 /*
2  * Copyright (C) 2005 - 2015 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17 
18 #include <linux/module.h>
19 #include "be.h"
20 #include "be_cmds.h"
21 
22 char *be_misconfig_evt_port_state[] = {
23 	"Physical Link is functional",
24 	"Optics faulted/incorrectly installed/not installed - Reseat optics. If issue not resolved, replace.",
25 	"Optics of two types installed – Remove one optic or install matching pair of optics.",
26 	"Incompatible optics – Replace with compatible optics for card to function.",
27 	"Unqualified optics – Replace with Avago optics for Warranty and Technical Support.",
28 	"Uncertified optics – Replace with Avago-certified optics to enable link operation."
29 };
30 
31 static char *be_port_misconfig_evt_severity[] = {
32 	"KERN_WARN",
33 	"KERN_INFO",
34 	"KERN_ERR",
35 	"KERN_WARN"
36 };
37 
38 static char *phy_state_oper_desc[] = {
39 	"Link is non-operational",
40 	"Link is operational",
41 	""
42 };
43 
44 static struct be_cmd_priv_map cmd_priv_map[] = {
45 	{
46 		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
47 		CMD_SUBSYSTEM_ETH,
48 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
49 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
50 	},
51 	{
52 		OPCODE_COMMON_GET_FLOW_CONTROL,
53 		CMD_SUBSYSTEM_COMMON,
54 		BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
55 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
56 	},
57 	{
58 		OPCODE_COMMON_SET_FLOW_CONTROL,
59 		CMD_SUBSYSTEM_COMMON,
60 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
61 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
62 	},
63 	{
64 		OPCODE_ETH_GET_PPORT_STATS,
65 		CMD_SUBSYSTEM_ETH,
66 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
67 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
68 	},
69 	{
70 		OPCODE_COMMON_GET_PHY_DETAILS,
71 		CMD_SUBSYSTEM_COMMON,
72 		BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
73 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
74 	},
75 	{
76 		OPCODE_LOWLEVEL_HOST_DDR_DMA,
77 		CMD_SUBSYSTEM_LOWLEVEL,
78 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
79 	},
80 	{
81 		OPCODE_LOWLEVEL_LOOPBACK_TEST,
82 		CMD_SUBSYSTEM_LOWLEVEL,
83 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
84 	},
85 	{
86 		OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
87 		CMD_SUBSYSTEM_LOWLEVEL,
88 		BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
89 	},
90 };
91 
92 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
93 {
94 	int i;
95 	int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
96 	u32 cmd_privileges = adapter->cmd_privileges;
97 
98 	for (i = 0; i < num_entries; i++)
99 		if (opcode == cmd_priv_map[i].opcode &&
100 		    subsystem == cmd_priv_map[i].subsystem)
101 			if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
102 				return false;
103 
104 	return true;
105 }
106 
107 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
108 {
109 	return wrb->payload.embedded_payload;
110 }
111 
112 static int be_mcc_notify(struct be_adapter *adapter)
113 {
114 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
115 	u32 val = 0;
116 
117 	if (be_check_error(adapter, BE_ERROR_ANY))
118 		return -EIO;
119 
120 	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
121 	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
122 
123 	wmb();
124 	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
125 
126 	return 0;
127 }
128 
129 /* To check if valid bit is set, check the entire word as we don't know
130  * the endianness of the data (old entry is host endian while a new entry is
131  * little endian) */
132 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
133 {
134 	u32 flags;
135 
136 	if (compl->flags != 0) {
137 		flags = le32_to_cpu(compl->flags);
138 		if (flags & CQE_FLAGS_VALID_MASK) {
139 			compl->flags = flags;
140 			return true;
141 		}
142 	}
143 	return false;
144 }
145 
146 /* Need to reset the entire word that houses the valid bit */
147 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
148 {
149 	compl->flags = 0;
150 }
151 
152 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
153 {
154 	unsigned long addr;
155 
156 	addr = tag1;
157 	addr = ((addr << 16) << 16) | tag0;
158 	return (void *)addr;
159 }
160 
161 static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
162 {
163 	if (base_status == MCC_STATUS_NOT_SUPPORTED ||
164 	    base_status == MCC_STATUS_ILLEGAL_REQUEST ||
165 	    addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
166 	    addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
167 	    (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
168 	    (base_status == MCC_STATUS_ILLEGAL_FIELD ||
169 	     addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
170 		return true;
171 	else
172 		return false;
173 }
174 
175 /* Place holder for all the async MCC cmds wherein the caller is not in a busy
176  * loop (has not issued be_mcc_notify_wait())
177  */
178 static void be_async_cmd_process(struct be_adapter *adapter,
179 				 struct be_mcc_compl *compl,
180 				 struct be_cmd_resp_hdr *resp_hdr)
181 {
182 	enum mcc_base_status base_status = base_status(compl->status);
183 	u8 opcode = 0, subsystem = 0;
184 
185 	if (resp_hdr) {
186 		opcode = resp_hdr->opcode;
187 		subsystem = resp_hdr->subsystem;
188 	}
189 
190 	if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
191 	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
192 		complete(&adapter->et_cmd_compl);
193 		return;
194 	}
195 
196 	if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
197 	    subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
198 		complete(&adapter->et_cmd_compl);
199 		return;
200 	}
201 
202 	if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
203 	     opcode == OPCODE_COMMON_WRITE_OBJECT) &&
204 	    subsystem == CMD_SUBSYSTEM_COMMON) {
205 		adapter->flash_status = compl->status;
206 		complete(&adapter->et_cmd_compl);
207 		return;
208 	}
209 
210 	if ((opcode == OPCODE_ETH_GET_STATISTICS ||
211 	     opcode == OPCODE_ETH_GET_PPORT_STATS) &&
212 	    subsystem == CMD_SUBSYSTEM_ETH &&
213 	    base_status == MCC_STATUS_SUCCESS) {
214 		be_parse_stats(adapter);
215 		adapter->stats_cmd_sent = false;
216 		return;
217 	}
218 
219 	if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
220 	    subsystem == CMD_SUBSYSTEM_COMMON) {
221 		if (base_status == MCC_STATUS_SUCCESS) {
222 			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
223 							(void *)resp_hdr;
224 			adapter->hwmon_info.be_on_die_temp =
225 						resp->on_die_temperature;
226 		} else {
227 			adapter->be_get_temp_freq = 0;
228 			adapter->hwmon_info.be_on_die_temp =
229 						BE_INVALID_DIE_TEMP;
230 		}
231 		return;
232 	}
233 }
234 
235 static int be_mcc_compl_process(struct be_adapter *adapter,
236 				struct be_mcc_compl *compl)
237 {
238 	enum mcc_base_status base_status;
239 	enum mcc_addl_status addl_status;
240 	struct be_cmd_resp_hdr *resp_hdr;
241 	u8 opcode = 0, subsystem = 0;
242 
243 	/* Just swap the status to host endian; mcc tag is opaquely copied
244 	 * from mcc_wrb */
245 	be_dws_le_to_cpu(compl, 4);
246 
247 	base_status = base_status(compl->status);
248 	addl_status = addl_status(compl->status);
249 
250 	resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
251 	if (resp_hdr) {
252 		opcode = resp_hdr->opcode;
253 		subsystem = resp_hdr->subsystem;
254 	}
255 
256 	be_async_cmd_process(adapter, compl, resp_hdr);
257 
258 	if (base_status != MCC_STATUS_SUCCESS &&
259 	    !be_skip_err_log(opcode, base_status, addl_status)) {
260 		if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST ||
261 		    addl_status == MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES) {
262 			dev_warn(&adapter->pdev->dev,
263 				 "VF is not privileged to issue opcode %d-%d\n",
264 				 opcode, subsystem);
265 		} else {
266 			dev_err(&adapter->pdev->dev,
267 				"opcode %d-%d failed:status %d-%d\n",
268 				opcode, subsystem, base_status, addl_status);
269 		}
270 	}
271 	return compl->status;
272 }
273 
274 /* Link state evt is a string of bytes; no need for endian swapping */
275 static void be_async_link_state_process(struct be_adapter *adapter,
276 					struct be_mcc_compl *compl)
277 {
278 	struct be_async_event_link_state *evt =
279 			(struct be_async_event_link_state *)compl;
280 
281 	/* When link status changes, link speed must be re-queried from FW */
282 	adapter->phy.link_speed = -1;
283 
284 	/* On BEx the FW does not send a separate link status
285 	 * notification for physical and logical link.
286 	 * On other chips just process the logical link
287 	 * status notification
288 	 */
289 	if (!BEx_chip(adapter) &&
290 	    !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
291 		return;
292 
293 	/* For the initial link status do not rely on the ASYNC event as
294 	 * it may not be received in some cases.
295 	 */
296 	if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
297 		be_link_status_update(adapter,
298 				      evt->port_link_status & LINK_STATUS_MASK);
299 }
300 
301 static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
302 						  struct be_mcc_compl *compl)
303 {
304 	struct be_async_event_misconfig_port *evt =
305 			(struct be_async_event_misconfig_port *)compl;
306 	u32 sfp_misconfig_evt_word1 = le32_to_cpu(evt->event_data_word1);
307 	u32 sfp_misconfig_evt_word2 = le32_to_cpu(evt->event_data_word2);
308 	u8 phy_oper_state = PHY_STATE_OPER_MSG_NONE;
309 	struct device *dev = &adapter->pdev->dev;
310 	u8 msg_severity = DEFAULT_MSG_SEVERITY;
311 	u8 phy_state_info;
312 	u8 new_phy_state;
313 
314 	new_phy_state =
315 		(sfp_misconfig_evt_word1 >> (adapter->hba_port_num * 8)) & 0xff;
316 
317 	if (new_phy_state == adapter->phy_state)
318 		return;
319 
320 	adapter->phy_state = new_phy_state;
321 
322 	/* for older fw that doesn't populate link effect data */
323 	if (!sfp_misconfig_evt_word2)
324 		goto log_message;
325 
326 	phy_state_info =
327 		(sfp_misconfig_evt_word2 >> (adapter->hba_port_num * 8)) & 0xff;
328 
329 	if (phy_state_info & PHY_STATE_INFO_VALID) {
330 		msg_severity = (phy_state_info & PHY_STATE_MSG_SEVERITY) >> 1;
331 
332 		if (be_phy_unqualified(new_phy_state))
333 			phy_oper_state = (phy_state_info & PHY_STATE_OPER);
334 	}
335 
336 log_message:
337 	/* Log an error message that would allow a user to determine
338 	 * whether the SFPs have an issue
339 	 */
340 	if (be_phy_state_unknown(new_phy_state))
341 		dev_printk(be_port_misconfig_evt_severity[msg_severity], dev,
342 			   "Port %c: Unrecognized Optics state: 0x%x. %s",
343 			   adapter->port_name,
344 			   new_phy_state,
345 			   phy_state_oper_desc[phy_oper_state]);
346 	else
347 		dev_printk(be_port_misconfig_evt_severity[msg_severity], dev,
348 			   "Port %c: %s %s",
349 			   adapter->port_name,
350 			   be_misconfig_evt_port_state[new_phy_state],
351 			   phy_state_oper_desc[phy_oper_state]);
352 
353 	/* Log Vendor name and part no. if a misconfigured SFP is detected */
354 	if (be_phy_misconfigured(new_phy_state))
355 		adapter->flags |= BE_FLAGS_PHY_MISCONFIGURED;
356 }
357 
358 /* Grp5 CoS Priority evt */
359 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
360 					       struct be_mcc_compl *compl)
361 {
362 	struct be_async_event_grp5_cos_priority *evt =
363 			(struct be_async_event_grp5_cos_priority *)compl;
364 
365 	if (evt->valid) {
366 		adapter->vlan_prio_bmap = evt->available_priority_bmap;
367 		adapter->recommended_prio_bits =
368 			evt->reco_default_priority << VLAN_PRIO_SHIFT;
369 	}
370 }
371 
372 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
373 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
374 					    struct be_mcc_compl *compl)
375 {
376 	struct be_async_event_grp5_qos_link_speed *evt =
377 			(struct be_async_event_grp5_qos_link_speed *)compl;
378 
379 	if (adapter->phy.link_speed >= 0 &&
380 	    evt->physical_port == adapter->port_num)
381 		adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
382 }
383 
384 /*Grp5 PVID evt*/
385 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
386 					     struct be_mcc_compl *compl)
387 {
388 	struct be_async_event_grp5_pvid_state *evt =
389 			(struct be_async_event_grp5_pvid_state *)compl;
390 
391 	if (evt->enabled) {
392 		adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
393 		dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
394 	} else {
395 		adapter->pvid = 0;
396 	}
397 }
398 
399 #define MGMT_ENABLE_MASK	0x4
400 static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
401 					     struct be_mcc_compl *compl)
402 {
403 	struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
404 	u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
405 
406 	if (evt_dw1 & MGMT_ENABLE_MASK) {
407 		adapter->flags |= BE_FLAGS_OS2BMC;
408 		adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
409 	} else {
410 		adapter->flags &= ~BE_FLAGS_OS2BMC;
411 	}
412 }
413 
414 static void be_async_grp5_evt_process(struct be_adapter *adapter,
415 				      struct be_mcc_compl *compl)
416 {
417 	u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
418 				ASYNC_EVENT_TYPE_MASK;
419 
420 	switch (event_type) {
421 	case ASYNC_EVENT_COS_PRIORITY:
422 		be_async_grp5_cos_priority_process(adapter, compl);
423 		break;
424 	case ASYNC_EVENT_QOS_SPEED:
425 		be_async_grp5_qos_speed_process(adapter, compl);
426 		break;
427 	case ASYNC_EVENT_PVID_STATE:
428 		be_async_grp5_pvid_state_process(adapter, compl);
429 		break;
430 	/* Async event to disable/enable os2bmc and/or mac-learning */
431 	case ASYNC_EVENT_FW_CONTROL:
432 		be_async_grp5_fw_control_process(adapter, compl);
433 		break;
434 	default:
435 		break;
436 	}
437 }
438 
439 static void be_async_dbg_evt_process(struct be_adapter *adapter,
440 				     struct be_mcc_compl *cmp)
441 {
442 	u8 event_type = 0;
443 	struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
444 
445 	event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
446 			ASYNC_EVENT_TYPE_MASK;
447 
448 	switch (event_type) {
449 	case ASYNC_DEBUG_EVENT_TYPE_QNQ:
450 		if (evt->valid)
451 			adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
452 		adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
453 	break;
454 	default:
455 		dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
456 			 event_type);
457 	break;
458 	}
459 }
460 
461 static void be_async_sliport_evt_process(struct be_adapter *adapter,
462 					 struct be_mcc_compl *cmp)
463 {
464 	u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
465 			ASYNC_EVENT_TYPE_MASK;
466 
467 	if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
468 		be_async_port_misconfig_event_process(adapter, cmp);
469 }
470 
471 static inline bool is_link_state_evt(u32 flags)
472 {
473 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
474 			ASYNC_EVENT_CODE_LINK_STATE;
475 }
476 
477 static inline bool is_grp5_evt(u32 flags)
478 {
479 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
480 			ASYNC_EVENT_CODE_GRP_5;
481 }
482 
483 static inline bool is_dbg_evt(u32 flags)
484 {
485 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
486 			ASYNC_EVENT_CODE_QNQ;
487 }
488 
489 static inline bool is_sliport_evt(u32 flags)
490 {
491 	return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
492 		ASYNC_EVENT_CODE_SLIPORT;
493 }
494 
495 static void be_mcc_event_process(struct be_adapter *adapter,
496 				 struct be_mcc_compl *compl)
497 {
498 	if (is_link_state_evt(compl->flags))
499 		be_async_link_state_process(adapter, compl);
500 	else if (is_grp5_evt(compl->flags))
501 		be_async_grp5_evt_process(adapter, compl);
502 	else if (is_dbg_evt(compl->flags))
503 		be_async_dbg_evt_process(adapter, compl);
504 	else if (is_sliport_evt(compl->flags))
505 		be_async_sliport_evt_process(adapter, compl);
506 }
507 
508 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
509 {
510 	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
511 	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
512 
513 	if (be_mcc_compl_is_new(compl)) {
514 		queue_tail_inc(mcc_cq);
515 		return compl;
516 	}
517 	return NULL;
518 }
519 
520 void be_async_mcc_enable(struct be_adapter *adapter)
521 {
522 	spin_lock_bh(&adapter->mcc_cq_lock);
523 
524 	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
525 	adapter->mcc_obj.rearm_cq = true;
526 
527 	spin_unlock_bh(&adapter->mcc_cq_lock);
528 }
529 
530 void be_async_mcc_disable(struct be_adapter *adapter)
531 {
532 	spin_lock_bh(&adapter->mcc_cq_lock);
533 
534 	adapter->mcc_obj.rearm_cq = false;
535 	be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
536 
537 	spin_unlock_bh(&adapter->mcc_cq_lock);
538 }
539 
540 int be_process_mcc(struct be_adapter *adapter)
541 {
542 	struct be_mcc_compl *compl;
543 	int num = 0, status = 0;
544 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
545 
546 	spin_lock(&adapter->mcc_cq_lock);
547 
548 	while ((compl = be_mcc_compl_get(adapter))) {
549 		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
550 			be_mcc_event_process(adapter, compl);
551 		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
552 			status = be_mcc_compl_process(adapter, compl);
553 			atomic_dec(&mcc_obj->q.used);
554 		}
555 		be_mcc_compl_use(compl);
556 		num++;
557 	}
558 
559 	if (num)
560 		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
561 
562 	spin_unlock(&adapter->mcc_cq_lock);
563 	return status;
564 }
565 
566 /* Wait till no more pending mcc requests are present */
567 static int be_mcc_wait_compl(struct be_adapter *adapter)
568 {
569 #define mcc_timeout		120000 /* 12s timeout */
570 	int i, status = 0;
571 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
572 
573 	for (i = 0; i < mcc_timeout; i++) {
574 		if (be_check_error(adapter, BE_ERROR_ANY))
575 			return -EIO;
576 
577 		local_bh_disable();
578 		status = be_process_mcc(adapter);
579 		local_bh_enable();
580 
581 		if (atomic_read(&mcc_obj->q.used) == 0)
582 			break;
583 		udelay(100);
584 	}
585 	if (i == mcc_timeout) {
586 		dev_err(&adapter->pdev->dev, "FW not responding\n");
587 		be_set_error(adapter, BE_ERROR_FW);
588 		return -EIO;
589 	}
590 	return status;
591 }
592 
593 /* Notify MCC requests and wait for completion */
594 static int be_mcc_notify_wait(struct be_adapter *adapter)
595 {
596 	int status;
597 	struct be_mcc_wrb *wrb;
598 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
599 	u32 index = mcc_obj->q.head;
600 	struct be_cmd_resp_hdr *resp;
601 
602 	index_dec(&index, mcc_obj->q.len);
603 	wrb = queue_index_node(&mcc_obj->q, index);
604 
605 	resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
606 
607 	status = be_mcc_notify(adapter);
608 	if (status)
609 		goto out;
610 
611 	status = be_mcc_wait_compl(adapter);
612 	if (status == -EIO)
613 		goto out;
614 
615 	status = (resp->base_status |
616 		  ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
617 		   CQE_ADDL_STATUS_SHIFT));
618 out:
619 	return status;
620 }
621 
622 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
623 {
624 	int msecs = 0;
625 	u32 ready;
626 
627 	do {
628 		if (be_check_error(adapter, BE_ERROR_ANY))
629 			return -EIO;
630 
631 		ready = ioread32(db);
632 		if (ready == 0xffffffff)
633 			return -1;
634 
635 		ready &= MPU_MAILBOX_DB_RDY_MASK;
636 		if (ready)
637 			break;
638 
639 		if (msecs > 4000) {
640 			dev_err(&adapter->pdev->dev, "FW not responding\n");
641 			be_set_error(adapter, BE_ERROR_FW);
642 			be_detect_error(adapter);
643 			return -1;
644 		}
645 
646 		msleep(1);
647 		msecs++;
648 	} while (true);
649 
650 	return 0;
651 }
652 
653 /*
654  * Insert the mailbox address into the doorbell in two steps
655  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
656  */
657 static int be_mbox_notify_wait(struct be_adapter *adapter)
658 {
659 	int status;
660 	u32 val = 0;
661 	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
662 	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
663 	struct be_mcc_mailbox *mbox = mbox_mem->va;
664 	struct be_mcc_compl *compl = &mbox->compl;
665 
666 	/* wait for ready to be set */
667 	status = be_mbox_db_ready_wait(adapter, db);
668 	if (status != 0)
669 		return status;
670 
671 	val |= MPU_MAILBOX_DB_HI_MASK;
672 	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
673 	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
674 	iowrite32(val, db);
675 
676 	/* wait for ready to be set */
677 	status = be_mbox_db_ready_wait(adapter, db);
678 	if (status != 0)
679 		return status;
680 
681 	val = 0;
682 	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
683 	val |= (u32)(mbox_mem->dma >> 4) << 2;
684 	iowrite32(val, db);
685 
686 	status = be_mbox_db_ready_wait(adapter, db);
687 	if (status != 0)
688 		return status;
689 
690 	/* A cq entry has been made now */
691 	if (be_mcc_compl_is_new(compl)) {
692 		status = be_mcc_compl_process(adapter, &mbox->compl);
693 		be_mcc_compl_use(compl);
694 		if (status)
695 			return status;
696 	} else {
697 		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
698 		return -1;
699 	}
700 	return 0;
701 }
702 
703 static u16 be_POST_stage_get(struct be_adapter *adapter)
704 {
705 	u32 sem;
706 
707 	if (BEx_chip(adapter))
708 		sem  = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
709 	else
710 		pci_read_config_dword(adapter->pdev,
711 				      SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
712 
713 	return sem & POST_STAGE_MASK;
714 }
715 
716 static int lancer_wait_ready(struct be_adapter *adapter)
717 {
718 #define SLIPORT_READY_TIMEOUT 30
719 	u32 sliport_status;
720 	int i;
721 
722 	for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
723 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
724 		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
725 			return 0;
726 
727 		if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
728 		    !(sliport_status & SLIPORT_STATUS_RN_MASK))
729 			return -EIO;
730 
731 		msleep(1000);
732 	}
733 
734 	return sliport_status ? : -1;
735 }
736 
737 int be_fw_wait_ready(struct be_adapter *adapter)
738 {
739 	u16 stage;
740 	int status, timeout = 0;
741 	struct device *dev = &adapter->pdev->dev;
742 
743 	if (lancer_chip(adapter)) {
744 		status = lancer_wait_ready(adapter);
745 		if (status) {
746 			stage = status;
747 			goto err;
748 		}
749 		return 0;
750 	}
751 
752 	do {
753 		/* There's no means to poll POST state on BE2/3 VFs */
754 		if (BEx_chip(adapter) && be_virtfn(adapter))
755 			return 0;
756 
757 		stage = be_POST_stage_get(adapter);
758 		if (stage == POST_STAGE_ARMFW_RDY)
759 			return 0;
760 
761 		dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
762 		if (msleep_interruptible(2000)) {
763 			dev_err(dev, "Waiting for POST aborted\n");
764 			return -EINTR;
765 		}
766 		timeout += 2;
767 	} while (timeout < 60);
768 
769 err:
770 	dev_err(dev, "POST timeout; stage=%#x\n", stage);
771 	return -ETIMEDOUT;
772 }
773 
774 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
775 {
776 	return &wrb->payload.sgl[0];
777 }
778 
779 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
780 {
781 	wrb->tag0 = addr & 0xFFFFFFFF;
782 	wrb->tag1 = upper_32_bits(addr);
783 }
784 
785 /* Don't touch the hdr after it's prepared */
786 /* mem will be NULL for embedded commands */
787 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
788 				   u8 subsystem, u8 opcode, int cmd_len,
789 				   struct be_mcc_wrb *wrb,
790 				   struct be_dma_mem *mem)
791 {
792 	struct be_sge *sge;
793 
794 	req_hdr->opcode = opcode;
795 	req_hdr->subsystem = subsystem;
796 	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
797 	req_hdr->version = 0;
798 	fill_wrb_tags(wrb, (ulong) req_hdr);
799 	wrb->payload_length = cmd_len;
800 	if (mem) {
801 		wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
802 			MCC_WRB_SGE_CNT_SHIFT;
803 		sge = nonembedded_sgl(wrb);
804 		sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
805 		sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
806 		sge->len = cpu_to_le32(mem->size);
807 	} else
808 		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
809 	be_dws_cpu_to_le(wrb, 8);
810 }
811 
812 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
813 				      struct be_dma_mem *mem)
814 {
815 	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
816 	u64 dma = (u64)mem->dma;
817 
818 	for (i = 0; i < buf_pages; i++) {
819 		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
820 		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
821 		dma += PAGE_SIZE_4K;
822 	}
823 }
824 
825 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
826 {
827 	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
828 	struct be_mcc_wrb *wrb
829 		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
830 	memset(wrb, 0, sizeof(*wrb));
831 	return wrb;
832 }
833 
834 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
835 {
836 	struct be_queue_info *mccq = &adapter->mcc_obj.q;
837 	struct be_mcc_wrb *wrb;
838 
839 	if (!mccq->created)
840 		return NULL;
841 
842 	if (atomic_read(&mccq->used) >= mccq->len)
843 		return NULL;
844 
845 	wrb = queue_head_node(mccq);
846 	queue_head_inc(mccq);
847 	atomic_inc(&mccq->used);
848 	memset(wrb, 0, sizeof(*wrb));
849 	return wrb;
850 }
851 
852 static bool use_mcc(struct be_adapter *adapter)
853 {
854 	return adapter->mcc_obj.q.created;
855 }
856 
857 /* Must be used only in process context */
858 static int be_cmd_lock(struct be_adapter *adapter)
859 {
860 	if (use_mcc(adapter)) {
861 		spin_lock_bh(&adapter->mcc_lock);
862 		return 0;
863 	} else {
864 		return mutex_lock_interruptible(&adapter->mbox_lock);
865 	}
866 }
867 
868 /* Must be used only in process context */
869 static void be_cmd_unlock(struct be_adapter *adapter)
870 {
871 	if (use_mcc(adapter))
872 		spin_unlock_bh(&adapter->mcc_lock);
873 	else
874 		return mutex_unlock(&adapter->mbox_lock);
875 }
876 
877 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
878 				      struct be_mcc_wrb *wrb)
879 {
880 	struct be_mcc_wrb *dest_wrb;
881 
882 	if (use_mcc(adapter)) {
883 		dest_wrb = wrb_from_mccq(adapter);
884 		if (!dest_wrb)
885 			return NULL;
886 	} else {
887 		dest_wrb = wrb_from_mbox(adapter);
888 	}
889 
890 	memcpy(dest_wrb, wrb, sizeof(*wrb));
891 	if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
892 		fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
893 
894 	return dest_wrb;
895 }
896 
897 /* Must be used only in process context */
898 static int be_cmd_notify_wait(struct be_adapter *adapter,
899 			      struct be_mcc_wrb *wrb)
900 {
901 	struct be_mcc_wrb *dest_wrb;
902 	int status;
903 
904 	status = be_cmd_lock(adapter);
905 	if (status)
906 		return status;
907 
908 	dest_wrb = be_cmd_copy(adapter, wrb);
909 	if (!dest_wrb) {
910 		status = -EBUSY;
911 		goto unlock;
912 	}
913 
914 	if (use_mcc(adapter))
915 		status = be_mcc_notify_wait(adapter);
916 	else
917 		status = be_mbox_notify_wait(adapter);
918 
919 	if (!status)
920 		memcpy(wrb, dest_wrb, sizeof(*wrb));
921 
922 unlock:
923 	be_cmd_unlock(adapter);
924 	return status;
925 }
926 
927 /* Tell fw we're about to start firing cmds by writing a
928  * special pattern across the wrb hdr; uses mbox
929  */
930 int be_cmd_fw_init(struct be_adapter *adapter)
931 {
932 	u8 *wrb;
933 	int status;
934 
935 	if (lancer_chip(adapter))
936 		return 0;
937 
938 	if (mutex_lock_interruptible(&adapter->mbox_lock))
939 		return -1;
940 
941 	wrb = (u8 *)wrb_from_mbox(adapter);
942 	*wrb++ = 0xFF;
943 	*wrb++ = 0x12;
944 	*wrb++ = 0x34;
945 	*wrb++ = 0xFF;
946 	*wrb++ = 0xFF;
947 	*wrb++ = 0x56;
948 	*wrb++ = 0x78;
949 	*wrb = 0xFF;
950 
951 	status = be_mbox_notify_wait(adapter);
952 
953 	mutex_unlock(&adapter->mbox_lock);
954 	return status;
955 }
956 
957 /* Tell fw we're done with firing cmds by writing a
958  * special pattern across the wrb hdr; uses mbox
959  */
960 int be_cmd_fw_clean(struct be_adapter *adapter)
961 {
962 	u8 *wrb;
963 	int status;
964 
965 	if (lancer_chip(adapter))
966 		return 0;
967 
968 	if (mutex_lock_interruptible(&adapter->mbox_lock))
969 		return -1;
970 
971 	wrb = (u8 *)wrb_from_mbox(adapter);
972 	*wrb++ = 0xFF;
973 	*wrb++ = 0xAA;
974 	*wrb++ = 0xBB;
975 	*wrb++ = 0xFF;
976 	*wrb++ = 0xFF;
977 	*wrb++ = 0xCC;
978 	*wrb++ = 0xDD;
979 	*wrb = 0xFF;
980 
981 	status = be_mbox_notify_wait(adapter);
982 
983 	mutex_unlock(&adapter->mbox_lock);
984 	return status;
985 }
986 
987 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
988 {
989 	struct be_mcc_wrb *wrb;
990 	struct be_cmd_req_eq_create *req;
991 	struct be_dma_mem *q_mem = &eqo->q.dma_mem;
992 	int status, ver = 0;
993 
994 	if (mutex_lock_interruptible(&adapter->mbox_lock))
995 		return -1;
996 
997 	wrb = wrb_from_mbox(adapter);
998 	req = embedded_payload(wrb);
999 
1000 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1001 			       OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
1002 			       NULL);
1003 
1004 	/* Support for EQ_CREATEv2 available only SH-R onwards */
1005 	if (!(BEx_chip(adapter) || lancer_chip(adapter)))
1006 		ver = 2;
1007 
1008 	req->hdr.version = ver;
1009 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1010 
1011 	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
1012 	/* 4byte eqe*/
1013 	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
1014 	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
1015 		      __ilog2_u32(eqo->q.len / 256));
1016 	be_dws_cpu_to_le(req->context, sizeof(req->context));
1017 
1018 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1019 
1020 	status = be_mbox_notify_wait(adapter);
1021 	if (!status) {
1022 		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
1023 
1024 		eqo->q.id = le16_to_cpu(resp->eq_id);
1025 		eqo->msix_idx =
1026 			(ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
1027 		eqo->q.created = true;
1028 	}
1029 
1030 	mutex_unlock(&adapter->mbox_lock);
1031 	return status;
1032 }
1033 
1034 /* Use MCC */
1035 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
1036 			  bool permanent, u32 if_handle, u32 pmac_id)
1037 {
1038 	struct be_mcc_wrb *wrb;
1039 	struct be_cmd_req_mac_query *req;
1040 	int status;
1041 
1042 	spin_lock_bh(&adapter->mcc_lock);
1043 
1044 	wrb = wrb_from_mccq(adapter);
1045 	if (!wrb) {
1046 		status = -EBUSY;
1047 		goto err;
1048 	}
1049 	req = embedded_payload(wrb);
1050 
1051 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1052 			       OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
1053 			       NULL);
1054 	req->type = MAC_ADDRESS_TYPE_NETWORK;
1055 	if (permanent) {
1056 		req->permanent = 1;
1057 	} else {
1058 		req->if_id = cpu_to_le16((u16)if_handle);
1059 		req->pmac_id = cpu_to_le32(pmac_id);
1060 		req->permanent = 0;
1061 	}
1062 
1063 	status = be_mcc_notify_wait(adapter);
1064 	if (!status) {
1065 		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
1066 
1067 		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
1068 	}
1069 
1070 err:
1071 	spin_unlock_bh(&adapter->mcc_lock);
1072 	return status;
1073 }
1074 
1075 /* Uses synchronous MCCQ */
1076 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
1077 		    u32 if_id, u32 *pmac_id, u32 domain)
1078 {
1079 	struct be_mcc_wrb *wrb;
1080 	struct be_cmd_req_pmac_add *req;
1081 	int status;
1082 
1083 	spin_lock_bh(&adapter->mcc_lock);
1084 
1085 	wrb = wrb_from_mccq(adapter);
1086 	if (!wrb) {
1087 		status = -EBUSY;
1088 		goto err;
1089 	}
1090 	req = embedded_payload(wrb);
1091 
1092 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1093 			       OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
1094 			       NULL);
1095 
1096 	req->hdr.domain = domain;
1097 	req->if_id = cpu_to_le32(if_id);
1098 	memcpy(req->mac_address, mac_addr, ETH_ALEN);
1099 
1100 	status = be_mcc_notify_wait(adapter);
1101 	if (!status) {
1102 		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
1103 
1104 		*pmac_id = le32_to_cpu(resp->pmac_id);
1105 	}
1106 
1107 err:
1108 	spin_unlock_bh(&adapter->mcc_lock);
1109 
1110 	 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
1111 		status = -EPERM;
1112 
1113 	return status;
1114 }
1115 
1116 /* Uses synchronous MCCQ */
1117 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
1118 {
1119 	struct be_mcc_wrb *wrb;
1120 	struct be_cmd_req_pmac_del *req;
1121 	int status;
1122 
1123 	if (pmac_id == -1)
1124 		return 0;
1125 
1126 	spin_lock_bh(&adapter->mcc_lock);
1127 
1128 	wrb = wrb_from_mccq(adapter);
1129 	if (!wrb) {
1130 		status = -EBUSY;
1131 		goto err;
1132 	}
1133 	req = embedded_payload(wrb);
1134 
1135 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1136 			       OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
1137 			       wrb, NULL);
1138 
1139 	req->hdr.domain = dom;
1140 	req->if_id = cpu_to_le32(if_id);
1141 	req->pmac_id = cpu_to_le32(pmac_id);
1142 
1143 	status = be_mcc_notify_wait(adapter);
1144 
1145 err:
1146 	spin_unlock_bh(&adapter->mcc_lock);
1147 	return status;
1148 }
1149 
1150 /* Uses Mbox */
1151 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1152 		     struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1153 {
1154 	struct be_mcc_wrb *wrb;
1155 	struct be_cmd_req_cq_create *req;
1156 	struct be_dma_mem *q_mem = &cq->dma_mem;
1157 	void *ctxt;
1158 	int status;
1159 
1160 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1161 		return -1;
1162 
1163 	wrb = wrb_from_mbox(adapter);
1164 	req = embedded_payload(wrb);
1165 	ctxt = &req->context;
1166 
1167 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1168 			       OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
1169 			       NULL);
1170 
1171 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1172 
1173 	if (BEx_chip(adapter)) {
1174 		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1175 			      coalesce_wm);
1176 		AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1177 			      ctxt, no_delay);
1178 		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1179 			      __ilog2_u32(cq->len / 256));
1180 		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1181 		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1182 		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1183 	} else {
1184 		req->hdr.version = 2;
1185 		req->page_size = 1; /* 1 for 4K */
1186 
1187 		/* coalesce-wm field in this cmd is not relevant to Lancer.
1188 		 * Lancer uses COMMON_MODIFY_CQ to set this field
1189 		 */
1190 		if (!lancer_chip(adapter))
1191 			AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
1192 				      ctxt, coalesce_wm);
1193 		AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1194 			      no_delay);
1195 		AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1196 			      __ilog2_u32(cq->len / 256));
1197 		AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1198 		AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
1199 		AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
1200 	}
1201 
1202 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1203 
1204 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1205 
1206 	status = be_mbox_notify_wait(adapter);
1207 	if (!status) {
1208 		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1209 
1210 		cq->id = le16_to_cpu(resp->cq_id);
1211 		cq->created = true;
1212 	}
1213 
1214 	mutex_unlock(&adapter->mbox_lock);
1215 
1216 	return status;
1217 }
1218 
1219 static u32 be_encoded_q_len(int q_len)
1220 {
1221 	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1222 
1223 	if (len_encoded == 16)
1224 		len_encoded = 0;
1225 	return len_encoded;
1226 }
1227 
1228 static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1229 				  struct be_queue_info *mccq,
1230 				  struct be_queue_info *cq)
1231 {
1232 	struct be_mcc_wrb *wrb;
1233 	struct be_cmd_req_mcc_ext_create *req;
1234 	struct be_dma_mem *q_mem = &mccq->dma_mem;
1235 	void *ctxt;
1236 	int status;
1237 
1238 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1239 		return -1;
1240 
1241 	wrb = wrb_from_mbox(adapter);
1242 	req = embedded_payload(wrb);
1243 	ctxt = &req->context;
1244 
1245 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1246 			       OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
1247 			       NULL);
1248 
1249 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1250 	if (BEx_chip(adapter)) {
1251 		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1252 		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1253 			      be_encoded_q_len(mccq->len));
1254 		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1255 	} else {
1256 		req->hdr.version = 1;
1257 		req->cq_id = cpu_to_le16(cq->id);
1258 
1259 		AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
1260 			      be_encoded_q_len(mccq->len));
1261 		AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
1262 		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
1263 			      ctxt, cq->id);
1264 		AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
1265 			      ctxt, 1);
1266 	}
1267 
1268 	/* Subscribe to Link State, Sliport Event and Group 5 Events
1269 	 * (bits 1, 5 and 17 set)
1270 	 */
1271 	req->async_event_bitmap[0] =
1272 			cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
1273 				    BIT(ASYNC_EVENT_CODE_GRP_5) |
1274 				    BIT(ASYNC_EVENT_CODE_QNQ) |
1275 				    BIT(ASYNC_EVENT_CODE_SLIPORT));
1276 
1277 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1278 
1279 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1280 
1281 	status = be_mbox_notify_wait(adapter);
1282 	if (!status) {
1283 		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1284 
1285 		mccq->id = le16_to_cpu(resp->id);
1286 		mccq->created = true;
1287 	}
1288 	mutex_unlock(&adapter->mbox_lock);
1289 
1290 	return status;
1291 }
1292 
1293 static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1294 				  struct be_queue_info *mccq,
1295 				  struct be_queue_info *cq)
1296 {
1297 	struct be_mcc_wrb *wrb;
1298 	struct be_cmd_req_mcc_create *req;
1299 	struct be_dma_mem *q_mem = &mccq->dma_mem;
1300 	void *ctxt;
1301 	int status;
1302 
1303 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1304 		return -1;
1305 
1306 	wrb = wrb_from_mbox(adapter);
1307 	req = embedded_payload(wrb);
1308 	ctxt = &req->context;
1309 
1310 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1311 			       OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
1312 			       NULL);
1313 
1314 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1315 
1316 	AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1317 	AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1318 		      be_encoded_q_len(mccq->len));
1319 	AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1320 
1321 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1322 
1323 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1324 
1325 	status = be_mbox_notify_wait(adapter);
1326 	if (!status) {
1327 		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1328 
1329 		mccq->id = le16_to_cpu(resp->id);
1330 		mccq->created = true;
1331 	}
1332 
1333 	mutex_unlock(&adapter->mbox_lock);
1334 	return status;
1335 }
1336 
1337 int be_cmd_mccq_create(struct be_adapter *adapter,
1338 		       struct be_queue_info *mccq, struct be_queue_info *cq)
1339 {
1340 	int status;
1341 
1342 	status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1343 	if (status && BEx_chip(adapter)) {
1344 		dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1345 			"or newer to avoid conflicting priorities between NIC "
1346 			"and FCoE traffic");
1347 		status = be_cmd_mccq_org_create(adapter, mccq, cq);
1348 	}
1349 	return status;
1350 }
1351 
1352 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1353 {
1354 	struct be_mcc_wrb wrb = {0};
1355 	struct be_cmd_req_eth_tx_create *req;
1356 	struct be_queue_info *txq = &txo->q;
1357 	struct be_queue_info *cq = &txo->cq;
1358 	struct be_dma_mem *q_mem = &txq->dma_mem;
1359 	int status, ver = 0;
1360 
1361 	req = embedded_payload(&wrb);
1362 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1363 			       OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1364 
1365 	if (lancer_chip(adapter)) {
1366 		req->hdr.version = 1;
1367 	} else if (BEx_chip(adapter)) {
1368 		if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1369 			req->hdr.version = 2;
1370 	} else { /* For SH */
1371 		req->hdr.version = 2;
1372 	}
1373 
1374 	if (req->hdr.version > 0)
1375 		req->if_id = cpu_to_le16(adapter->if_handle);
1376 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1377 	req->ulp_num = BE_ULP1_NUM;
1378 	req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1379 	req->cq_id = cpu_to_le16(cq->id);
1380 	req->queue_size = be_encoded_q_len(txq->len);
1381 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1382 	ver = req->hdr.version;
1383 
1384 	status = be_cmd_notify_wait(adapter, &wrb);
1385 	if (!status) {
1386 		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1387 
1388 		txq->id = le16_to_cpu(resp->cid);
1389 		if (ver == 2)
1390 			txo->db_offset = le32_to_cpu(resp->db_offset);
1391 		else
1392 			txo->db_offset = DB_TXULP1_OFFSET;
1393 		txq->created = true;
1394 	}
1395 
1396 	return status;
1397 }
1398 
1399 /* Uses MCC */
1400 int be_cmd_rxq_create(struct be_adapter *adapter,
1401 		      struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1402 		      u32 if_id, u32 rss, u8 *rss_id)
1403 {
1404 	struct be_mcc_wrb *wrb;
1405 	struct be_cmd_req_eth_rx_create *req;
1406 	struct be_dma_mem *q_mem = &rxq->dma_mem;
1407 	int status;
1408 
1409 	spin_lock_bh(&adapter->mcc_lock);
1410 
1411 	wrb = wrb_from_mccq(adapter);
1412 	if (!wrb) {
1413 		status = -EBUSY;
1414 		goto err;
1415 	}
1416 	req = embedded_payload(wrb);
1417 
1418 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1419 			       OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1420 
1421 	req->cq_id = cpu_to_le16(cq_id);
1422 	req->frag_size = fls(frag_size) - 1;
1423 	req->num_pages = 2;
1424 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1425 	req->interface_id = cpu_to_le32(if_id);
1426 	req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1427 	req->rss_queue = cpu_to_le32(rss);
1428 
1429 	status = be_mcc_notify_wait(adapter);
1430 	if (!status) {
1431 		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1432 
1433 		rxq->id = le16_to_cpu(resp->id);
1434 		rxq->created = true;
1435 		*rss_id = resp->rss_id;
1436 	}
1437 
1438 err:
1439 	spin_unlock_bh(&adapter->mcc_lock);
1440 	return status;
1441 }
1442 
1443 /* Generic destroyer function for all types of queues
1444  * Uses Mbox
1445  */
1446 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1447 		     int queue_type)
1448 {
1449 	struct be_mcc_wrb *wrb;
1450 	struct be_cmd_req_q_destroy *req;
1451 	u8 subsys = 0, opcode = 0;
1452 	int status;
1453 
1454 	if (mutex_lock_interruptible(&adapter->mbox_lock))
1455 		return -1;
1456 
1457 	wrb = wrb_from_mbox(adapter);
1458 	req = embedded_payload(wrb);
1459 
1460 	switch (queue_type) {
1461 	case QTYPE_EQ:
1462 		subsys = CMD_SUBSYSTEM_COMMON;
1463 		opcode = OPCODE_COMMON_EQ_DESTROY;
1464 		break;
1465 	case QTYPE_CQ:
1466 		subsys = CMD_SUBSYSTEM_COMMON;
1467 		opcode = OPCODE_COMMON_CQ_DESTROY;
1468 		break;
1469 	case QTYPE_TXQ:
1470 		subsys = CMD_SUBSYSTEM_ETH;
1471 		opcode = OPCODE_ETH_TX_DESTROY;
1472 		break;
1473 	case QTYPE_RXQ:
1474 		subsys = CMD_SUBSYSTEM_ETH;
1475 		opcode = OPCODE_ETH_RX_DESTROY;
1476 		break;
1477 	case QTYPE_MCCQ:
1478 		subsys = CMD_SUBSYSTEM_COMMON;
1479 		opcode = OPCODE_COMMON_MCC_DESTROY;
1480 		break;
1481 	default:
1482 		BUG();
1483 	}
1484 
1485 	be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1486 			       NULL);
1487 	req->id = cpu_to_le16(q->id);
1488 
1489 	status = be_mbox_notify_wait(adapter);
1490 	q->created = false;
1491 
1492 	mutex_unlock(&adapter->mbox_lock);
1493 	return status;
1494 }
1495 
1496 /* Uses MCC */
1497 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1498 {
1499 	struct be_mcc_wrb *wrb;
1500 	struct be_cmd_req_q_destroy *req;
1501 	int status;
1502 
1503 	spin_lock_bh(&adapter->mcc_lock);
1504 
1505 	wrb = wrb_from_mccq(adapter);
1506 	if (!wrb) {
1507 		status = -EBUSY;
1508 		goto err;
1509 	}
1510 	req = embedded_payload(wrb);
1511 
1512 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1513 			       OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1514 	req->id = cpu_to_le16(q->id);
1515 
1516 	status = be_mcc_notify_wait(adapter);
1517 	q->created = false;
1518 
1519 err:
1520 	spin_unlock_bh(&adapter->mcc_lock);
1521 	return status;
1522 }
1523 
1524 /* Create an rx filtering policy configuration on an i/f
1525  * Will use MBOX only if MCCQ has not been created.
1526  */
1527 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1528 		     u32 *if_handle, u32 domain)
1529 {
1530 	struct be_mcc_wrb wrb = {0};
1531 	struct be_cmd_req_if_create *req;
1532 	int status;
1533 
1534 	req = embedded_payload(&wrb);
1535 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1536 			       OPCODE_COMMON_NTWK_INTERFACE_CREATE,
1537 			       sizeof(*req), &wrb, NULL);
1538 	req->hdr.domain = domain;
1539 	req->capability_flags = cpu_to_le32(cap_flags);
1540 	req->enable_flags = cpu_to_le32(en_flags);
1541 	req->pmac_invalid = true;
1542 
1543 	status = be_cmd_notify_wait(adapter, &wrb);
1544 	if (!status) {
1545 		struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1546 
1547 		*if_handle = le32_to_cpu(resp->interface_id);
1548 
1549 		/* Hack to retrieve VF's pmac-id on BE3 */
1550 		if (BE3_chip(adapter) && be_virtfn(adapter))
1551 			adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1552 	}
1553 	return status;
1554 }
1555 
1556 /* Uses MCCQ if available else MBOX */
1557 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1558 {
1559 	struct be_mcc_wrb wrb = {0};
1560 	struct be_cmd_req_if_destroy *req;
1561 	int status;
1562 
1563 	if (interface_id == -1)
1564 		return 0;
1565 
1566 	req = embedded_payload(&wrb);
1567 
1568 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1569 			       OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
1570 			       sizeof(*req), &wrb, NULL);
1571 	req->hdr.domain = domain;
1572 	req->interface_id = cpu_to_le32(interface_id);
1573 
1574 	status = be_cmd_notify_wait(adapter, &wrb);
1575 	return status;
1576 }
1577 
1578 /* Get stats is a non embedded command: the request is not embedded inside
1579  * WRB but is a separate dma memory block
1580  * Uses asynchronous MCC
1581  */
1582 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1583 {
1584 	struct be_mcc_wrb *wrb;
1585 	struct be_cmd_req_hdr *hdr;
1586 	int status = 0;
1587 
1588 	spin_lock_bh(&adapter->mcc_lock);
1589 
1590 	wrb = wrb_from_mccq(adapter);
1591 	if (!wrb) {
1592 		status = -EBUSY;
1593 		goto err;
1594 	}
1595 	hdr = nonemb_cmd->va;
1596 
1597 	be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1598 			       OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
1599 			       nonemb_cmd);
1600 
1601 	/* version 1 of the cmd is not supported only by BE2 */
1602 	if (BE2_chip(adapter))
1603 		hdr->version = 0;
1604 	if (BE3_chip(adapter) || lancer_chip(adapter))
1605 		hdr->version = 1;
1606 	else
1607 		hdr->version = 2;
1608 
1609 	status = be_mcc_notify(adapter);
1610 	if (status)
1611 		goto err;
1612 
1613 	adapter->stats_cmd_sent = true;
1614 
1615 err:
1616 	spin_unlock_bh(&adapter->mcc_lock);
1617 	return status;
1618 }
1619 
1620 /* Lancer Stats */
1621 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1622 			       struct be_dma_mem *nonemb_cmd)
1623 {
1624 	struct be_mcc_wrb *wrb;
1625 	struct lancer_cmd_req_pport_stats *req;
1626 	int status = 0;
1627 
1628 	if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1629 			    CMD_SUBSYSTEM_ETH))
1630 		return -EPERM;
1631 
1632 	spin_lock_bh(&adapter->mcc_lock);
1633 
1634 	wrb = wrb_from_mccq(adapter);
1635 	if (!wrb) {
1636 		status = -EBUSY;
1637 		goto err;
1638 	}
1639 	req = nonemb_cmd->va;
1640 
1641 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1642 			       OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
1643 			       wrb, nonemb_cmd);
1644 
1645 	req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1646 	req->cmd_params.params.reset_stats = 0;
1647 
1648 	status = be_mcc_notify(adapter);
1649 	if (status)
1650 		goto err;
1651 
1652 	adapter->stats_cmd_sent = true;
1653 
1654 err:
1655 	spin_unlock_bh(&adapter->mcc_lock);
1656 	return status;
1657 }
1658 
1659 static int be_mac_to_link_speed(int mac_speed)
1660 {
1661 	switch (mac_speed) {
1662 	case PHY_LINK_SPEED_ZERO:
1663 		return 0;
1664 	case PHY_LINK_SPEED_10MBPS:
1665 		return 10;
1666 	case PHY_LINK_SPEED_100MBPS:
1667 		return 100;
1668 	case PHY_LINK_SPEED_1GBPS:
1669 		return 1000;
1670 	case PHY_LINK_SPEED_10GBPS:
1671 		return 10000;
1672 	case PHY_LINK_SPEED_20GBPS:
1673 		return 20000;
1674 	case PHY_LINK_SPEED_25GBPS:
1675 		return 25000;
1676 	case PHY_LINK_SPEED_40GBPS:
1677 		return 40000;
1678 	}
1679 	return 0;
1680 }
1681 
1682 /* Uses synchronous mcc
1683  * Returns link_speed in Mbps
1684  */
1685 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1686 			     u8 *link_status, u32 dom)
1687 {
1688 	struct be_mcc_wrb *wrb;
1689 	struct be_cmd_req_link_status *req;
1690 	int status;
1691 
1692 	spin_lock_bh(&adapter->mcc_lock);
1693 
1694 	if (link_status)
1695 		*link_status = LINK_DOWN;
1696 
1697 	wrb = wrb_from_mccq(adapter);
1698 	if (!wrb) {
1699 		status = -EBUSY;
1700 		goto err;
1701 	}
1702 	req = embedded_payload(wrb);
1703 
1704 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1705 			       OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
1706 			       sizeof(*req), wrb, NULL);
1707 
1708 	/* version 1 of the cmd is not supported only by BE2 */
1709 	if (!BE2_chip(adapter))
1710 		req->hdr.version = 1;
1711 
1712 	req->hdr.domain = dom;
1713 
1714 	status = be_mcc_notify_wait(adapter);
1715 	if (!status) {
1716 		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1717 
1718 		if (link_speed) {
1719 			*link_speed = resp->link_speed ?
1720 				      le16_to_cpu(resp->link_speed) * 10 :
1721 				      be_mac_to_link_speed(resp->mac_speed);
1722 
1723 			if (!resp->logical_link_status)
1724 				*link_speed = 0;
1725 		}
1726 		if (link_status)
1727 			*link_status = resp->logical_link_status;
1728 	}
1729 
1730 err:
1731 	spin_unlock_bh(&adapter->mcc_lock);
1732 	return status;
1733 }
1734 
1735 /* Uses synchronous mcc */
1736 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1737 {
1738 	struct be_mcc_wrb *wrb;
1739 	struct be_cmd_req_get_cntl_addnl_attribs *req;
1740 	int status = 0;
1741 
1742 	spin_lock_bh(&adapter->mcc_lock);
1743 
1744 	wrb = wrb_from_mccq(adapter);
1745 	if (!wrb) {
1746 		status = -EBUSY;
1747 		goto err;
1748 	}
1749 	req = embedded_payload(wrb);
1750 
1751 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1752 			       OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
1753 			       sizeof(*req), wrb, NULL);
1754 
1755 	status = be_mcc_notify(adapter);
1756 err:
1757 	spin_unlock_bh(&adapter->mcc_lock);
1758 	return status;
1759 }
1760 
1761 /* Uses synchronous mcc */
1762 int be_cmd_get_fat_dump_len(struct be_adapter *adapter, u32 *dump_size)
1763 {
1764 	struct be_mcc_wrb wrb = {0};
1765 	struct be_cmd_req_get_fat *req;
1766 	int status;
1767 
1768 	req = embedded_payload(&wrb);
1769 
1770 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1771 			       OPCODE_COMMON_MANAGE_FAT, sizeof(*req),
1772 			       &wrb, NULL);
1773 	req->fat_operation = cpu_to_le32(QUERY_FAT);
1774 	status = be_cmd_notify_wait(adapter, &wrb);
1775 	if (!status) {
1776 		struct be_cmd_resp_get_fat *resp = embedded_payload(&wrb);
1777 
1778 		if (dump_size && resp->log_size)
1779 			*dump_size = le32_to_cpu(resp->log_size) -
1780 					sizeof(u32);
1781 	}
1782 	return status;
1783 }
1784 
1785 int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
1786 {
1787 	struct be_dma_mem get_fat_cmd;
1788 	struct be_mcc_wrb *wrb;
1789 	struct be_cmd_req_get_fat *req;
1790 	u32 offset = 0, total_size, buf_size,
1791 				log_offset = sizeof(u32), payload_len;
1792 	int status;
1793 
1794 	if (buf_len == 0)
1795 		return 0;
1796 
1797 	total_size = buf_len;
1798 
1799 	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1800 	get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
1801 					     get_fat_cmd.size,
1802 					     &get_fat_cmd.dma, GFP_ATOMIC);
1803 	if (!get_fat_cmd.va)
1804 		return -ENOMEM;
1805 
1806 	spin_lock_bh(&adapter->mcc_lock);
1807 
1808 	while (total_size) {
1809 		buf_size = min(total_size, (u32)60*1024);
1810 		total_size -= buf_size;
1811 
1812 		wrb = wrb_from_mccq(adapter);
1813 		if (!wrb) {
1814 			status = -EBUSY;
1815 			goto err;
1816 		}
1817 		req = get_fat_cmd.va;
1818 
1819 		payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1820 		be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1821 				       OPCODE_COMMON_MANAGE_FAT, payload_len,
1822 				       wrb, &get_fat_cmd);
1823 
1824 		req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1825 		req->read_log_offset = cpu_to_le32(log_offset);
1826 		req->read_log_length = cpu_to_le32(buf_size);
1827 		req->data_buffer_size = cpu_to_le32(buf_size);
1828 
1829 		status = be_mcc_notify_wait(adapter);
1830 		if (!status) {
1831 			struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1832 
1833 			memcpy(buf + offset,
1834 			       resp->data_buffer,
1835 			       le32_to_cpu(resp->read_log_length));
1836 		} else {
1837 			dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1838 			goto err;
1839 		}
1840 		offset += buf_size;
1841 		log_offset += buf_size;
1842 	}
1843 err:
1844 	dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
1845 			  get_fat_cmd.va, get_fat_cmd.dma);
1846 	spin_unlock_bh(&adapter->mcc_lock);
1847 	return status;
1848 }
1849 
1850 /* Uses synchronous mcc */
1851 int be_cmd_get_fw_ver(struct be_adapter *adapter)
1852 {
1853 	struct be_mcc_wrb *wrb;
1854 	struct be_cmd_req_get_fw_version *req;
1855 	int status;
1856 
1857 	spin_lock_bh(&adapter->mcc_lock);
1858 
1859 	wrb = wrb_from_mccq(adapter);
1860 	if (!wrb) {
1861 		status = -EBUSY;
1862 		goto err;
1863 	}
1864 
1865 	req = embedded_payload(wrb);
1866 
1867 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1868 			       OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
1869 			       NULL);
1870 	status = be_mcc_notify_wait(adapter);
1871 	if (!status) {
1872 		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1873 
1874 		strlcpy(adapter->fw_ver, resp->firmware_version_string,
1875 			sizeof(adapter->fw_ver));
1876 		strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
1877 			sizeof(adapter->fw_on_flash));
1878 	}
1879 err:
1880 	spin_unlock_bh(&adapter->mcc_lock);
1881 	return status;
1882 }
1883 
1884 /* set the EQ delay interval of an EQ to specified value
1885  * Uses async mcc
1886  */
1887 static int __be_cmd_modify_eqd(struct be_adapter *adapter,
1888 			       struct be_set_eqd *set_eqd, int num)
1889 {
1890 	struct be_mcc_wrb *wrb;
1891 	struct be_cmd_req_modify_eq_delay *req;
1892 	int status = 0, i;
1893 
1894 	spin_lock_bh(&adapter->mcc_lock);
1895 
1896 	wrb = wrb_from_mccq(adapter);
1897 	if (!wrb) {
1898 		status = -EBUSY;
1899 		goto err;
1900 	}
1901 	req = embedded_payload(wrb);
1902 
1903 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1904 			       OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
1905 			       NULL);
1906 
1907 	req->num_eq = cpu_to_le32(num);
1908 	for (i = 0; i < num; i++) {
1909 		req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
1910 		req->set_eqd[i].phase = 0;
1911 		req->set_eqd[i].delay_multiplier =
1912 				cpu_to_le32(set_eqd[i].delay_multiplier);
1913 	}
1914 
1915 	status = be_mcc_notify(adapter);
1916 err:
1917 	spin_unlock_bh(&adapter->mcc_lock);
1918 	return status;
1919 }
1920 
1921 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
1922 		      int num)
1923 {
1924 	int num_eqs, i = 0;
1925 
1926 	while (num) {
1927 		num_eqs = min(num, 8);
1928 		__be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
1929 		i += num_eqs;
1930 		num -= num_eqs;
1931 	}
1932 
1933 	return 0;
1934 }
1935 
1936 /* Uses sycnhronous mcc */
1937 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1938 		       u32 num, u32 domain)
1939 {
1940 	struct be_mcc_wrb *wrb;
1941 	struct be_cmd_req_vlan_config *req;
1942 	int status;
1943 
1944 	spin_lock_bh(&adapter->mcc_lock);
1945 
1946 	wrb = wrb_from_mccq(adapter);
1947 	if (!wrb) {
1948 		status = -EBUSY;
1949 		goto err;
1950 	}
1951 	req = embedded_payload(wrb);
1952 
1953 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1954 			       OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
1955 			       wrb, NULL);
1956 	req->hdr.domain = domain;
1957 
1958 	req->interface_id = if_id;
1959 	req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
1960 	req->num_vlan = num;
1961 	memcpy(req->normal_vlan, vtag_array,
1962 	       req->num_vlan * sizeof(vtag_array[0]));
1963 
1964 	status = be_mcc_notify_wait(adapter);
1965 err:
1966 	spin_unlock_bh(&adapter->mcc_lock);
1967 	return status;
1968 }
1969 
1970 static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1971 {
1972 	struct be_mcc_wrb *wrb;
1973 	struct be_dma_mem *mem = &adapter->rx_filter;
1974 	struct be_cmd_req_rx_filter *req = mem->va;
1975 	int status;
1976 
1977 	spin_lock_bh(&adapter->mcc_lock);
1978 
1979 	wrb = wrb_from_mccq(adapter);
1980 	if (!wrb) {
1981 		status = -EBUSY;
1982 		goto err;
1983 	}
1984 	memset(req, 0, sizeof(*req));
1985 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1986 			       OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1987 			       wrb, mem);
1988 
1989 	req->if_id = cpu_to_le32(adapter->if_handle);
1990 	req->if_flags_mask = cpu_to_le32(flags);
1991 	req->if_flags = (value == ON) ? req->if_flags_mask : 0;
1992 
1993 	if (flags & BE_IF_FLAGS_MULTICAST) {
1994 		struct netdev_hw_addr *ha;
1995 		int i = 0;
1996 
1997 		/* Reset mcast promisc mode if already set by setting mask
1998 		 * and not setting flags field
1999 		 */
2000 		req->if_flags_mask |=
2001 			cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
2002 				    be_if_cap_flags(adapter));
2003 		req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
2004 		netdev_for_each_mc_addr(ha, adapter->netdev)
2005 			memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
2006 	}
2007 
2008 	status = be_mcc_notify_wait(adapter);
2009 err:
2010 	spin_unlock_bh(&adapter->mcc_lock);
2011 	return status;
2012 }
2013 
2014 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
2015 {
2016 	struct device *dev = &adapter->pdev->dev;
2017 
2018 	if ((flags & be_if_cap_flags(adapter)) != flags) {
2019 		dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
2020 		dev_warn(dev, "Interface is capable of 0x%x flags only\n",
2021 			 be_if_cap_flags(adapter));
2022 	}
2023 	flags &= be_if_cap_flags(adapter);
2024 	if (!flags)
2025 		return -ENOTSUPP;
2026 
2027 	return __be_cmd_rx_filter(adapter, flags, value);
2028 }
2029 
2030 /* Uses synchrounous mcc */
2031 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
2032 {
2033 	struct be_mcc_wrb *wrb;
2034 	struct be_cmd_req_set_flow_control *req;
2035 	int status;
2036 
2037 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
2038 			    CMD_SUBSYSTEM_COMMON))
2039 		return -EPERM;
2040 
2041 	spin_lock_bh(&adapter->mcc_lock);
2042 
2043 	wrb = wrb_from_mccq(adapter);
2044 	if (!wrb) {
2045 		status = -EBUSY;
2046 		goto err;
2047 	}
2048 	req = embedded_payload(wrb);
2049 
2050 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2051 			       OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
2052 			       wrb, NULL);
2053 
2054 	req->hdr.version = 1;
2055 	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
2056 	req->rx_flow_control = cpu_to_le16((u16)rx_fc);
2057 
2058 	status = be_mcc_notify_wait(adapter);
2059 
2060 err:
2061 	spin_unlock_bh(&adapter->mcc_lock);
2062 
2063 	if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
2064 		return  -EOPNOTSUPP;
2065 
2066 	return status;
2067 }
2068 
2069 /* Uses sycn mcc */
2070 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
2071 {
2072 	struct be_mcc_wrb *wrb;
2073 	struct be_cmd_req_get_flow_control *req;
2074 	int status;
2075 
2076 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
2077 			    CMD_SUBSYSTEM_COMMON))
2078 		return -EPERM;
2079 
2080 	spin_lock_bh(&adapter->mcc_lock);
2081 
2082 	wrb = wrb_from_mccq(adapter);
2083 	if (!wrb) {
2084 		status = -EBUSY;
2085 		goto err;
2086 	}
2087 	req = embedded_payload(wrb);
2088 
2089 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2090 			       OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
2091 			       wrb, NULL);
2092 
2093 	status = be_mcc_notify_wait(adapter);
2094 	if (!status) {
2095 		struct be_cmd_resp_get_flow_control *resp =
2096 						embedded_payload(wrb);
2097 
2098 		*tx_fc = le16_to_cpu(resp->tx_flow_control);
2099 		*rx_fc = le16_to_cpu(resp->rx_flow_control);
2100 	}
2101 
2102 err:
2103 	spin_unlock_bh(&adapter->mcc_lock);
2104 	return status;
2105 }
2106 
2107 /* Uses mbox */
2108 int be_cmd_query_fw_cfg(struct be_adapter *adapter)
2109 {
2110 	struct be_mcc_wrb *wrb;
2111 	struct be_cmd_req_query_fw_cfg *req;
2112 	int status;
2113 
2114 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2115 		return -1;
2116 
2117 	wrb = wrb_from_mbox(adapter);
2118 	req = embedded_payload(wrb);
2119 
2120 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2121 			       OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
2122 			       sizeof(*req), wrb, NULL);
2123 
2124 	status = be_mbox_notify_wait(adapter);
2125 	if (!status) {
2126 		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
2127 
2128 		adapter->port_num = le32_to_cpu(resp->phys_port);
2129 		adapter->function_mode = le32_to_cpu(resp->function_mode);
2130 		adapter->function_caps = le32_to_cpu(resp->function_caps);
2131 		adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
2132 		dev_info(&adapter->pdev->dev,
2133 			 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2134 			 adapter->function_mode, adapter->function_caps);
2135 	}
2136 
2137 	mutex_unlock(&adapter->mbox_lock);
2138 	return status;
2139 }
2140 
2141 /* Uses mbox */
2142 int be_cmd_reset_function(struct be_adapter *adapter)
2143 {
2144 	struct be_mcc_wrb *wrb;
2145 	struct be_cmd_req_hdr *req;
2146 	int status;
2147 
2148 	if (lancer_chip(adapter)) {
2149 		iowrite32(SLI_PORT_CONTROL_IP_MASK,
2150 			  adapter->db + SLIPORT_CONTROL_OFFSET);
2151 		status = lancer_wait_ready(adapter);
2152 		if (status)
2153 			dev_err(&adapter->pdev->dev,
2154 				"Adapter in non recoverable error\n");
2155 		return status;
2156 	}
2157 
2158 	if (mutex_lock_interruptible(&adapter->mbox_lock))
2159 		return -1;
2160 
2161 	wrb = wrb_from_mbox(adapter);
2162 	req = embedded_payload(wrb);
2163 
2164 	be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
2165 			       OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
2166 			       NULL);
2167 
2168 	status = be_mbox_notify_wait(adapter);
2169 
2170 	mutex_unlock(&adapter->mbox_lock);
2171 	return status;
2172 }
2173 
2174 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
2175 		      u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
2176 {
2177 	struct be_mcc_wrb *wrb;
2178 	struct be_cmd_req_rss_config *req;
2179 	int status;
2180 
2181 	if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
2182 		return 0;
2183 
2184 	spin_lock_bh(&adapter->mcc_lock);
2185 
2186 	wrb = wrb_from_mccq(adapter);
2187 	if (!wrb) {
2188 		status = -EBUSY;
2189 		goto err;
2190 	}
2191 	req = embedded_payload(wrb);
2192 
2193 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2194 			       OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2195 
2196 	req->if_id = cpu_to_le32(adapter->if_handle);
2197 	req->enable_rss = cpu_to_le16(rss_hash_opts);
2198 	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2199 
2200 	if (!BEx_chip(adapter))
2201 		req->hdr.version = 1;
2202 
2203 	memcpy(req->cpu_table, rsstable, table_size);
2204 	memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
2205 	be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2206 
2207 	status = be_mcc_notify_wait(adapter);
2208 err:
2209 	spin_unlock_bh(&adapter->mcc_lock);
2210 	return status;
2211 }
2212 
2213 /* Uses sync mcc */
2214 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2215 			    u8 bcn, u8 sts, u8 state)
2216 {
2217 	struct be_mcc_wrb *wrb;
2218 	struct be_cmd_req_enable_disable_beacon *req;
2219 	int status;
2220 
2221 	spin_lock_bh(&adapter->mcc_lock);
2222 
2223 	wrb = wrb_from_mccq(adapter);
2224 	if (!wrb) {
2225 		status = -EBUSY;
2226 		goto err;
2227 	}
2228 	req = embedded_payload(wrb);
2229 
2230 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2231 			       OPCODE_COMMON_ENABLE_DISABLE_BEACON,
2232 			       sizeof(*req), wrb, NULL);
2233 
2234 	req->port_num = port_num;
2235 	req->beacon_state = state;
2236 	req->beacon_duration = bcn;
2237 	req->status_duration = sts;
2238 
2239 	status = be_mcc_notify_wait(adapter);
2240 
2241 err:
2242 	spin_unlock_bh(&adapter->mcc_lock);
2243 	return status;
2244 }
2245 
2246 /* Uses sync mcc */
2247 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2248 {
2249 	struct be_mcc_wrb *wrb;
2250 	struct be_cmd_req_get_beacon_state *req;
2251 	int status;
2252 
2253 	spin_lock_bh(&adapter->mcc_lock);
2254 
2255 	wrb = wrb_from_mccq(adapter);
2256 	if (!wrb) {
2257 		status = -EBUSY;
2258 		goto err;
2259 	}
2260 	req = embedded_payload(wrb);
2261 
2262 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2263 			       OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
2264 			       wrb, NULL);
2265 
2266 	req->port_num = port_num;
2267 
2268 	status = be_mcc_notify_wait(adapter);
2269 	if (!status) {
2270 		struct be_cmd_resp_get_beacon_state *resp =
2271 						embedded_payload(wrb);
2272 
2273 		*state = resp->beacon_state;
2274 	}
2275 
2276 err:
2277 	spin_unlock_bh(&adapter->mcc_lock);
2278 	return status;
2279 }
2280 
2281 /* Uses sync mcc */
2282 int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
2283 				      u8 page_num, u8 *data)
2284 {
2285 	struct be_dma_mem cmd;
2286 	struct be_mcc_wrb *wrb;
2287 	struct be_cmd_req_port_type *req;
2288 	int status;
2289 
2290 	if (page_num > TR_PAGE_A2)
2291 		return -EINVAL;
2292 
2293 	cmd.size = sizeof(struct be_cmd_resp_port_type);
2294 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2295 				     GFP_ATOMIC);
2296 	if (!cmd.va) {
2297 		dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
2298 		return -ENOMEM;
2299 	}
2300 
2301 	spin_lock_bh(&adapter->mcc_lock);
2302 
2303 	wrb = wrb_from_mccq(adapter);
2304 	if (!wrb) {
2305 		status = -EBUSY;
2306 		goto err;
2307 	}
2308 	req = cmd.va;
2309 
2310 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2311 			       OPCODE_COMMON_READ_TRANSRECV_DATA,
2312 			       cmd.size, wrb, &cmd);
2313 
2314 	req->port = cpu_to_le32(adapter->hba_port_num);
2315 	req->page_num = cpu_to_le32(page_num);
2316 	status = be_mcc_notify_wait(adapter);
2317 	if (!status) {
2318 		struct be_cmd_resp_port_type *resp = cmd.va;
2319 
2320 		memcpy(data, resp->page_data, PAGE_DATA_LEN);
2321 	}
2322 err:
2323 	spin_unlock_bh(&adapter->mcc_lock);
2324 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2325 	return status;
2326 }
2327 
2328 static int lancer_cmd_write_object(struct be_adapter *adapter,
2329 				   struct be_dma_mem *cmd, u32 data_size,
2330 				   u32 data_offset, const char *obj_name,
2331 				   u32 *data_written, u8 *change_status,
2332 				   u8 *addn_status)
2333 {
2334 	struct be_mcc_wrb *wrb;
2335 	struct lancer_cmd_req_write_object *req;
2336 	struct lancer_cmd_resp_write_object *resp;
2337 	void *ctxt = NULL;
2338 	int status;
2339 
2340 	spin_lock_bh(&adapter->mcc_lock);
2341 	adapter->flash_status = 0;
2342 
2343 	wrb = wrb_from_mccq(adapter);
2344 	if (!wrb) {
2345 		status = -EBUSY;
2346 		goto err_unlock;
2347 	}
2348 
2349 	req = embedded_payload(wrb);
2350 
2351 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2352 			       OPCODE_COMMON_WRITE_OBJECT,
2353 			       sizeof(struct lancer_cmd_req_write_object), wrb,
2354 			       NULL);
2355 
2356 	ctxt = &req->context;
2357 	AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2358 		      write_length, ctxt, data_size);
2359 
2360 	if (data_size == 0)
2361 		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2362 			      eof, ctxt, 1);
2363 	else
2364 		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2365 			      eof, ctxt, 0);
2366 
2367 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
2368 	req->write_offset = cpu_to_le32(data_offset);
2369 	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2370 	req->descriptor_count = cpu_to_le32(1);
2371 	req->buf_len = cpu_to_le32(data_size);
2372 	req->addr_low = cpu_to_le32((cmd->dma +
2373 				     sizeof(struct lancer_cmd_req_write_object))
2374 				    & 0xFFFFFFFF);
2375 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2376 				sizeof(struct lancer_cmd_req_write_object)));
2377 
2378 	status = be_mcc_notify(adapter);
2379 	if (status)
2380 		goto err_unlock;
2381 
2382 	spin_unlock_bh(&adapter->mcc_lock);
2383 
2384 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2385 					 msecs_to_jiffies(60000)))
2386 		status = -ETIMEDOUT;
2387 	else
2388 		status = adapter->flash_status;
2389 
2390 	resp = embedded_payload(wrb);
2391 	if (!status) {
2392 		*data_written = le32_to_cpu(resp->actual_write_len);
2393 		*change_status = resp->change_status;
2394 	} else {
2395 		*addn_status = resp->additional_status;
2396 	}
2397 
2398 	return status;
2399 
2400 err_unlock:
2401 	spin_unlock_bh(&adapter->mcc_lock);
2402 	return status;
2403 }
2404 
2405 int be_cmd_query_cable_type(struct be_adapter *adapter)
2406 {
2407 	u8 page_data[PAGE_DATA_LEN];
2408 	int status;
2409 
2410 	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2411 						   page_data);
2412 	if (!status) {
2413 		switch (adapter->phy.interface_type) {
2414 		case PHY_TYPE_QSFP:
2415 			adapter->phy.cable_type =
2416 				page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
2417 			break;
2418 		case PHY_TYPE_SFP_PLUS_10GB:
2419 			adapter->phy.cable_type =
2420 				page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
2421 			break;
2422 		default:
2423 			adapter->phy.cable_type = 0;
2424 			break;
2425 		}
2426 	}
2427 	return status;
2428 }
2429 
2430 int be_cmd_query_sfp_info(struct be_adapter *adapter)
2431 {
2432 	u8 page_data[PAGE_DATA_LEN];
2433 	int status;
2434 
2435 	status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
2436 						   page_data);
2437 	if (!status) {
2438 		strlcpy(adapter->phy.vendor_name, page_data +
2439 			SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
2440 		strlcpy(adapter->phy.vendor_pn,
2441 			page_data + SFP_VENDOR_PN_OFFSET,
2442 			SFP_VENDOR_NAME_LEN - 1);
2443 	}
2444 
2445 	return status;
2446 }
2447 
2448 static int lancer_cmd_delete_object(struct be_adapter *adapter,
2449 				    const char *obj_name)
2450 {
2451 	struct lancer_cmd_req_delete_object *req;
2452 	struct be_mcc_wrb *wrb;
2453 	int status;
2454 
2455 	spin_lock_bh(&adapter->mcc_lock);
2456 
2457 	wrb = wrb_from_mccq(adapter);
2458 	if (!wrb) {
2459 		status = -EBUSY;
2460 		goto err;
2461 	}
2462 
2463 	req = embedded_payload(wrb);
2464 
2465 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2466 			       OPCODE_COMMON_DELETE_OBJECT,
2467 			       sizeof(*req), wrb, NULL);
2468 
2469 	strlcpy(req->object_name, obj_name, sizeof(req->object_name));
2470 
2471 	status = be_mcc_notify_wait(adapter);
2472 err:
2473 	spin_unlock_bh(&adapter->mcc_lock);
2474 	return status;
2475 }
2476 
2477 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2478 			   u32 data_size, u32 data_offset, const char *obj_name,
2479 			   u32 *data_read, u32 *eof, u8 *addn_status)
2480 {
2481 	struct be_mcc_wrb *wrb;
2482 	struct lancer_cmd_req_read_object *req;
2483 	struct lancer_cmd_resp_read_object *resp;
2484 	int status;
2485 
2486 	spin_lock_bh(&adapter->mcc_lock);
2487 
2488 	wrb = wrb_from_mccq(adapter);
2489 	if (!wrb) {
2490 		status = -EBUSY;
2491 		goto err_unlock;
2492 	}
2493 
2494 	req = embedded_payload(wrb);
2495 
2496 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2497 			       OPCODE_COMMON_READ_OBJECT,
2498 			       sizeof(struct lancer_cmd_req_read_object), wrb,
2499 			       NULL);
2500 
2501 	req->desired_read_len = cpu_to_le32(data_size);
2502 	req->read_offset = cpu_to_le32(data_offset);
2503 	strcpy(req->object_name, obj_name);
2504 	req->descriptor_count = cpu_to_le32(1);
2505 	req->buf_len = cpu_to_le32(data_size);
2506 	req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2507 	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2508 
2509 	status = be_mcc_notify_wait(adapter);
2510 
2511 	resp = embedded_payload(wrb);
2512 	if (!status) {
2513 		*data_read = le32_to_cpu(resp->actual_read_len);
2514 		*eof = le32_to_cpu(resp->eof);
2515 	} else {
2516 		*addn_status = resp->additional_status;
2517 	}
2518 
2519 err_unlock:
2520 	spin_unlock_bh(&adapter->mcc_lock);
2521 	return status;
2522 }
2523 
2524 static int be_cmd_write_flashrom(struct be_adapter *adapter,
2525 				 struct be_dma_mem *cmd, u32 flash_type,
2526 				 u32 flash_opcode, u32 img_offset, u32 buf_size)
2527 {
2528 	struct be_mcc_wrb *wrb;
2529 	struct be_cmd_write_flashrom *req;
2530 	int status;
2531 
2532 	spin_lock_bh(&adapter->mcc_lock);
2533 	adapter->flash_status = 0;
2534 
2535 	wrb = wrb_from_mccq(adapter);
2536 	if (!wrb) {
2537 		status = -EBUSY;
2538 		goto err_unlock;
2539 	}
2540 	req = cmd->va;
2541 
2542 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2543 			       OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
2544 			       cmd);
2545 
2546 	req->params.op_type = cpu_to_le32(flash_type);
2547 	if (flash_type == OPTYPE_OFFSET_SPECIFIED)
2548 		req->params.offset = cpu_to_le32(img_offset);
2549 
2550 	req->params.op_code = cpu_to_le32(flash_opcode);
2551 	req->params.data_buf_size = cpu_to_le32(buf_size);
2552 
2553 	status = be_mcc_notify(adapter);
2554 	if (status)
2555 		goto err_unlock;
2556 
2557 	spin_unlock_bh(&adapter->mcc_lock);
2558 
2559 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
2560 					 msecs_to_jiffies(40000)))
2561 		status = -ETIMEDOUT;
2562 	else
2563 		status = adapter->flash_status;
2564 
2565 	return status;
2566 
2567 err_unlock:
2568 	spin_unlock_bh(&adapter->mcc_lock);
2569 	return status;
2570 }
2571 
2572 static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2573 				u16 img_optype, u32 img_offset, u32 crc_offset)
2574 {
2575 	struct be_cmd_read_flash_crc *req;
2576 	struct be_mcc_wrb *wrb;
2577 	int status;
2578 
2579 	spin_lock_bh(&adapter->mcc_lock);
2580 
2581 	wrb = wrb_from_mccq(adapter);
2582 	if (!wrb) {
2583 		status = -EBUSY;
2584 		goto err;
2585 	}
2586 	req = embedded_payload(wrb);
2587 
2588 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2589 			       OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2590 			       wrb, NULL);
2591 
2592 	req->params.op_type = cpu_to_le32(img_optype);
2593 	if (img_optype == OPTYPE_OFFSET_SPECIFIED)
2594 		req->params.offset = cpu_to_le32(img_offset + crc_offset);
2595 	else
2596 		req->params.offset = cpu_to_le32(crc_offset);
2597 
2598 	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2599 	req->params.data_buf_size = cpu_to_le32(0x4);
2600 
2601 	status = be_mcc_notify_wait(adapter);
2602 	if (!status)
2603 		memcpy(flashed_crc, req->crc, 4);
2604 
2605 err:
2606 	spin_unlock_bh(&adapter->mcc_lock);
2607 	return status;
2608 }
2609 
2610 static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2611 
2612 static bool phy_flashing_required(struct be_adapter *adapter)
2613 {
2614 	return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
2615 		adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2616 }
2617 
2618 static bool is_comp_in_ufi(struct be_adapter *adapter,
2619 			   struct flash_section_info *fsec, int type)
2620 {
2621 	int i = 0, img_type = 0;
2622 	struct flash_section_info_g2 *fsec_g2 = NULL;
2623 
2624 	if (BE2_chip(adapter))
2625 		fsec_g2 = (struct flash_section_info_g2 *)fsec;
2626 
2627 	for (i = 0; i < MAX_FLASH_COMP; i++) {
2628 		if (fsec_g2)
2629 			img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2630 		else
2631 			img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2632 
2633 		if (img_type == type)
2634 			return true;
2635 	}
2636 	return false;
2637 }
2638 
2639 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2640 						int header_size,
2641 						const struct firmware *fw)
2642 {
2643 	struct flash_section_info *fsec = NULL;
2644 	const u8 *p = fw->data;
2645 
2646 	p += header_size;
2647 	while (p < (fw->data + fw->size)) {
2648 		fsec = (struct flash_section_info *)p;
2649 		if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2650 			return fsec;
2651 		p += 32;
2652 	}
2653 	return NULL;
2654 }
2655 
2656 static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
2657 			      u32 img_offset, u32 img_size, int hdr_size,
2658 			      u16 img_optype, bool *crc_match)
2659 {
2660 	u32 crc_offset;
2661 	int status;
2662 	u8 crc[4];
2663 
2664 	status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
2665 				      img_size - 4);
2666 	if (status)
2667 		return status;
2668 
2669 	crc_offset = hdr_size + img_offset + img_size - 4;
2670 
2671 	/* Skip flashing, if crc of flashed region matches */
2672 	if (!memcmp(crc, p + crc_offset, 4))
2673 		*crc_match = true;
2674 	else
2675 		*crc_match = false;
2676 
2677 	return status;
2678 }
2679 
2680 static int be_flash(struct be_adapter *adapter, const u8 *img,
2681 		    struct be_dma_mem *flash_cmd, int optype, int img_size,
2682 		    u32 img_offset)
2683 {
2684 	u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
2685 	struct be_cmd_write_flashrom *req = flash_cmd->va;
2686 	int status;
2687 
2688 	while (total_bytes) {
2689 		num_bytes = min_t(u32, 32 * 1024, total_bytes);
2690 
2691 		total_bytes -= num_bytes;
2692 
2693 		if (!total_bytes) {
2694 			if (optype == OPTYPE_PHY_FW)
2695 				flash_op = FLASHROM_OPER_PHY_FLASH;
2696 			else
2697 				flash_op = FLASHROM_OPER_FLASH;
2698 		} else {
2699 			if (optype == OPTYPE_PHY_FW)
2700 				flash_op = FLASHROM_OPER_PHY_SAVE;
2701 			else
2702 				flash_op = FLASHROM_OPER_SAVE;
2703 		}
2704 
2705 		memcpy(req->data_buf, img, num_bytes);
2706 		img += num_bytes;
2707 		status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
2708 					       flash_op, img_offset +
2709 					       bytes_sent, num_bytes);
2710 		if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
2711 		    optype == OPTYPE_PHY_FW)
2712 			break;
2713 		else if (status)
2714 			return status;
2715 
2716 		bytes_sent += num_bytes;
2717 	}
2718 	return 0;
2719 }
2720 
2721 /* For BE2, BE3 and BE3-R */
2722 static int be_flash_BEx(struct be_adapter *adapter,
2723 			const struct firmware *fw,
2724 			struct be_dma_mem *flash_cmd, int num_of_images)
2725 {
2726 	int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2727 	struct device *dev = &adapter->pdev->dev;
2728 	struct flash_section_info *fsec = NULL;
2729 	int status, i, filehdr_size, num_comp;
2730 	const struct flash_comp *pflashcomp;
2731 	bool crc_match;
2732 	const u8 *p;
2733 
2734 	struct flash_comp gen3_flash_types[] = {
2735 		{ BE3_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
2736 			BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
2737 		{ BE3_REDBOOT_START, OPTYPE_REDBOOT,
2738 			BE3_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
2739 		{ BE3_ISCSI_BIOS_START, OPTYPE_BIOS,
2740 			BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
2741 		{ BE3_PXE_BIOS_START, OPTYPE_PXE_BIOS,
2742 			BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
2743 		{ BE3_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
2744 			BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
2745 		{ BE3_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
2746 			BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
2747 		{ BE3_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
2748 			BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
2749 		{ BE3_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
2750 			BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE},
2751 		{ BE3_NCSI_START, OPTYPE_NCSI_FW,
2752 			BE3_NCSI_COMP_MAX_SIZE, IMAGE_NCSI},
2753 		{ BE3_PHY_FW_START, OPTYPE_PHY_FW,
2754 			BE3_PHY_FW_COMP_MAX_SIZE, IMAGE_FIRMWARE_PHY}
2755 	};
2756 
2757 	struct flash_comp gen2_flash_types[] = {
2758 		{ BE2_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
2759 			BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
2760 		{ BE2_REDBOOT_START, OPTYPE_REDBOOT,
2761 			BE2_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
2762 		{ BE2_ISCSI_BIOS_START, OPTYPE_BIOS,
2763 			BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
2764 		{ BE2_PXE_BIOS_START, OPTYPE_PXE_BIOS,
2765 			BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
2766 		{ BE2_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
2767 			BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
2768 		{ BE2_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
2769 			BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
2770 		{ BE2_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
2771 			BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
2772 		{ BE2_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
2773 			 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE}
2774 	};
2775 
2776 	if (BE3_chip(adapter)) {
2777 		pflashcomp = gen3_flash_types;
2778 		filehdr_size = sizeof(struct flash_file_hdr_g3);
2779 		num_comp = ARRAY_SIZE(gen3_flash_types);
2780 	} else {
2781 		pflashcomp = gen2_flash_types;
2782 		filehdr_size = sizeof(struct flash_file_hdr_g2);
2783 		num_comp = ARRAY_SIZE(gen2_flash_types);
2784 		img_hdrs_size = 0;
2785 	}
2786 
2787 	/* Get flash section info*/
2788 	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2789 	if (!fsec) {
2790 		dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
2791 		return -1;
2792 	}
2793 	for (i = 0; i < num_comp; i++) {
2794 		if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
2795 			continue;
2796 
2797 		if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2798 		    memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2799 			continue;
2800 
2801 		if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
2802 		    !phy_flashing_required(adapter))
2803 			continue;
2804 
2805 		if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
2806 			status = be_check_flash_crc(adapter, fw->data,
2807 						    pflashcomp[i].offset,
2808 						    pflashcomp[i].size,
2809 						    filehdr_size +
2810 						    img_hdrs_size,
2811 						    OPTYPE_REDBOOT, &crc_match);
2812 			if (status) {
2813 				dev_err(dev,
2814 					"Could not get CRC for 0x%x region\n",
2815 					pflashcomp[i].optype);
2816 				continue;
2817 			}
2818 
2819 			if (crc_match)
2820 				continue;
2821 		}
2822 
2823 		p = fw->data + filehdr_size + pflashcomp[i].offset +
2824 			img_hdrs_size;
2825 		if (p + pflashcomp[i].size > fw->data + fw->size)
2826 			return -1;
2827 
2828 		status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
2829 				  pflashcomp[i].size, 0);
2830 		if (status) {
2831 			dev_err(dev, "Flashing section type 0x%x failed\n",
2832 				pflashcomp[i].img_type);
2833 			return status;
2834 		}
2835 	}
2836 	return 0;
2837 }
2838 
2839 static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
2840 {
2841 	u32 img_type = le32_to_cpu(fsec_entry.type);
2842 	u16 img_optype = le16_to_cpu(fsec_entry.optype);
2843 
2844 	if (img_optype != 0xFFFF)
2845 		return img_optype;
2846 
2847 	switch (img_type) {
2848 	case IMAGE_FIRMWARE_ISCSI:
2849 		img_optype = OPTYPE_ISCSI_ACTIVE;
2850 		break;
2851 	case IMAGE_BOOT_CODE:
2852 		img_optype = OPTYPE_REDBOOT;
2853 		break;
2854 	case IMAGE_OPTION_ROM_ISCSI:
2855 		img_optype = OPTYPE_BIOS;
2856 		break;
2857 	case IMAGE_OPTION_ROM_PXE:
2858 		img_optype = OPTYPE_PXE_BIOS;
2859 		break;
2860 	case IMAGE_OPTION_ROM_FCOE:
2861 		img_optype = OPTYPE_FCOE_BIOS;
2862 		break;
2863 	case IMAGE_FIRMWARE_BACKUP_ISCSI:
2864 		img_optype = OPTYPE_ISCSI_BACKUP;
2865 		break;
2866 	case IMAGE_NCSI:
2867 		img_optype = OPTYPE_NCSI_FW;
2868 		break;
2869 	case IMAGE_FLASHISM_JUMPVECTOR:
2870 		img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
2871 		break;
2872 	case IMAGE_FIRMWARE_PHY:
2873 		img_optype = OPTYPE_SH_PHY_FW;
2874 		break;
2875 	case IMAGE_REDBOOT_DIR:
2876 		img_optype = OPTYPE_REDBOOT_DIR;
2877 		break;
2878 	case IMAGE_REDBOOT_CONFIG:
2879 		img_optype = OPTYPE_REDBOOT_CONFIG;
2880 		break;
2881 	case IMAGE_UFI_DIR:
2882 		img_optype = OPTYPE_UFI_DIR;
2883 		break;
2884 	default:
2885 		break;
2886 	}
2887 
2888 	return img_optype;
2889 }
2890 
2891 static int be_flash_skyhawk(struct be_adapter *adapter,
2892 			    const struct firmware *fw,
2893 			    struct be_dma_mem *flash_cmd, int num_of_images)
2894 {
2895 	int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
2896 	bool crc_match, old_fw_img, flash_offset_support = true;
2897 	struct device *dev = &adapter->pdev->dev;
2898 	struct flash_section_info *fsec = NULL;
2899 	u32 img_offset, img_size, img_type;
2900 	u16 img_optype, flash_optype;
2901 	int status, i, filehdr_size;
2902 	const u8 *p;
2903 
2904 	filehdr_size = sizeof(struct flash_file_hdr_g3);
2905 	fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2906 	if (!fsec) {
2907 		dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
2908 		return -EINVAL;
2909 	}
2910 
2911 retry_flash:
2912 	for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
2913 		img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
2914 		img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
2915 		img_type   = le32_to_cpu(fsec->fsec_entry[i].type);
2916 		img_optype = be_get_img_optype(fsec->fsec_entry[i]);
2917 		old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
2918 
2919 		if (img_optype == 0xFFFF)
2920 			continue;
2921 
2922 		if (flash_offset_support)
2923 			flash_optype = OPTYPE_OFFSET_SPECIFIED;
2924 		else
2925 			flash_optype = img_optype;
2926 
2927 		/* Don't bother verifying CRC if an old FW image is being
2928 		 * flashed
2929 		 */
2930 		if (old_fw_img)
2931 			goto flash;
2932 
2933 		status = be_check_flash_crc(adapter, fw->data, img_offset,
2934 					    img_size, filehdr_size +
2935 					    img_hdrs_size, flash_optype,
2936 					    &crc_match);
2937 		if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
2938 		    base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
2939 			/* The current FW image on the card does not support
2940 			 * OFFSET based flashing. Retry using older mechanism
2941 			 * of OPTYPE based flashing
2942 			 */
2943 			if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
2944 				flash_offset_support = false;
2945 				goto retry_flash;
2946 			}
2947 
2948 			/* The current FW image on the card does not recognize
2949 			 * the new FLASH op_type. The FW download is partially
2950 			 * complete. Reboot the server now to enable FW image
2951 			 * to recognize the new FLASH op_type. To complete the
2952 			 * remaining process, download the same FW again after
2953 			 * the reboot.
2954 			 */
2955 			dev_err(dev, "Flash incomplete. Reset the server\n");
2956 			dev_err(dev, "Download FW image again after reset\n");
2957 			return -EAGAIN;
2958 		} else if (status) {
2959 			dev_err(dev, "Could not get CRC for 0x%x region\n",
2960 				img_optype);
2961 			return -EFAULT;
2962 		}
2963 
2964 		if (crc_match)
2965 			continue;
2966 
2967 flash:
2968 		p = fw->data + filehdr_size + img_offset + img_hdrs_size;
2969 		if (p + img_size > fw->data + fw->size)
2970 			return -1;
2971 
2972 		status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
2973 				  img_offset);
2974 
2975 		/* The current FW image on the card does not support OFFSET
2976 		 * based flashing. Retry using older mechanism of OPTYPE based
2977 		 * flashing
2978 		 */
2979 		if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
2980 		    flash_optype == OPTYPE_OFFSET_SPECIFIED) {
2981 			flash_offset_support = false;
2982 			goto retry_flash;
2983 		}
2984 
2985 		/* For old FW images ignore ILLEGAL_FIELD error or errors on
2986 		 * UFI_DIR region
2987 		 */
2988 		if (old_fw_img &&
2989 		    (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
2990 		     (img_optype == OPTYPE_UFI_DIR &&
2991 		      base_status(status) == MCC_STATUS_FAILED))) {
2992 			continue;
2993 		} else if (status) {
2994 			dev_err(dev, "Flashing section type 0x%x failed\n",
2995 				img_type);
2996 
2997 			switch (addl_status(status)) {
2998 			case MCC_ADDL_STATUS_MISSING_SIGNATURE:
2999 				dev_err(dev,
3000 					"Digital signature missing in FW\n");
3001 				return -EINVAL;
3002 			case MCC_ADDL_STATUS_INVALID_SIGNATURE:
3003 				dev_err(dev,
3004 					"Invalid digital signature in FW\n");
3005 				return -EINVAL;
3006 			default:
3007 				return -EFAULT;
3008 			}
3009 		}
3010 	}
3011 	return 0;
3012 }
3013 
3014 int lancer_fw_download(struct be_adapter *adapter,
3015 		       const struct firmware *fw)
3016 {
3017 	struct device *dev = &adapter->pdev->dev;
3018 	struct be_dma_mem flash_cmd;
3019 	const u8 *data_ptr = NULL;
3020 	u8 *dest_image_ptr = NULL;
3021 	size_t image_size = 0;
3022 	u32 chunk_size = 0;
3023 	u32 data_written = 0;
3024 	u32 offset = 0;
3025 	int status = 0;
3026 	u8 add_status = 0;
3027 	u8 change_status;
3028 
3029 	if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3030 		dev_err(dev, "FW image size should be multiple of 4\n");
3031 		return -EINVAL;
3032 	}
3033 
3034 	flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3035 				+ LANCER_FW_DOWNLOAD_CHUNK;
3036 	flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
3037 					   &flash_cmd.dma, GFP_KERNEL);
3038 	if (!flash_cmd.va)
3039 		return -ENOMEM;
3040 
3041 	dest_image_ptr = flash_cmd.va +
3042 				sizeof(struct lancer_cmd_req_write_object);
3043 	image_size = fw->size;
3044 	data_ptr = fw->data;
3045 
3046 	while (image_size) {
3047 		chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3048 
3049 		/* Copy the image chunk content. */
3050 		memcpy(dest_image_ptr, data_ptr, chunk_size);
3051 
3052 		status = lancer_cmd_write_object(adapter, &flash_cmd,
3053 						 chunk_size, offset,
3054 						 LANCER_FW_DOWNLOAD_LOCATION,
3055 						 &data_written, &change_status,
3056 						 &add_status);
3057 		if (status)
3058 			break;
3059 
3060 		offset += data_written;
3061 		data_ptr += data_written;
3062 		image_size -= data_written;
3063 	}
3064 
3065 	if (!status) {
3066 		/* Commit the FW written */
3067 		status = lancer_cmd_write_object(adapter, &flash_cmd,
3068 						 0, offset,
3069 						 LANCER_FW_DOWNLOAD_LOCATION,
3070 						 &data_written, &change_status,
3071 						 &add_status);
3072 	}
3073 
3074 	dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
3075 	if (status) {
3076 		dev_err(dev, "Firmware load error\n");
3077 		return be_cmd_status(status);
3078 	}
3079 
3080 	dev_info(dev, "Firmware flashed successfully\n");
3081 
3082 	if (change_status == LANCER_FW_RESET_NEEDED) {
3083 		dev_info(dev, "Resetting adapter to activate new FW\n");
3084 		status = lancer_physdev_ctrl(adapter,
3085 					     PHYSDEV_CONTROL_FW_RESET_MASK);
3086 		if (status) {
3087 			dev_err(dev, "Adapter busy, could not reset FW\n");
3088 			dev_err(dev, "Reboot server to activate new FW\n");
3089 		}
3090 	} else if (change_status != LANCER_NO_RESET_NEEDED) {
3091 		dev_info(dev, "Reboot server to activate new FW\n");
3092 	}
3093 
3094 	return 0;
3095 }
3096 
3097 /* Check if the flash image file is compatible with the adapter that
3098  * is being flashed.
3099  */
3100 static bool be_check_ufi_compatibility(struct be_adapter *adapter,
3101 				       struct flash_file_hdr_g3 *fhdr)
3102 {
3103 	if (!fhdr) {
3104 		dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
3105 		return false;
3106 	}
3107 
3108 	/* First letter of the build version is used to identify
3109 	 * which chip this image file is meant for.
3110 	 */
3111 	switch (fhdr->build[0]) {
3112 	case BLD_STR_UFI_TYPE_SH:
3113 		if (!skyhawk_chip(adapter))
3114 			return false;
3115 		break;
3116 	case BLD_STR_UFI_TYPE_BE3:
3117 		if (!BE3_chip(adapter))
3118 			return false;
3119 		break;
3120 	case BLD_STR_UFI_TYPE_BE2:
3121 		if (!BE2_chip(adapter))
3122 			return false;
3123 		break;
3124 	default:
3125 		return false;
3126 	}
3127 
3128 	/* In BE3 FW images the "asic_type_rev" field doesn't track the
3129 	 * asic_rev of the chips it is compatible with.
3130 	 * When asic_type_rev is 0 the image is compatible only with
3131 	 * pre-BE3-R chips (asic_rev < 0x10)
3132 	 */
3133 	if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
3134 		return adapter->asic_rev < 0x10;
3135 	else
3136 		return (fhdr->asic_type_rev >= adapter->asic_rev);
3137 }
3138 
3139 int be_fw_download(struct be_adapter *adapter, const struct firmware *fw)
3140 {
3141 	struct device *dev = &adapter->pdev->dev;
3142 	struct flash_file_hdr_g3 *fhdr3;
3143 	struct image_hdr *img_hdr_ptr;
3144 	int status = 0, i, num_imgs;
3145 	struct be_dma_mem flash_cmd;
3146 
3147 	fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3148 	if (!be_check_ufi_compatibility(adapter, fhdr3)) {
3149 		dev_err(dev, "Flash image is not compatible with adapter\n");
3150 		return -EINVAL;
3151 	}
3152 
3153 	flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3154 	flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
3155 					   GFP_KERNEL);
3156 	if (!flash_cmd.va)
3157 		return -ENOMEM;
3158 
3159 	num_imgs = le32_to_cpu(fhdr3->num_imgs);
3160 	for (i = 0; i < num_imgs; i++) {
3161 		img_hdr_ptr = (struct image_hdr *)(fw->data +
3162 				(sizeof(struct flash_file_hdr_g3) +
3163 				 i * sizeof(struct image_hdr)));
3164 		if (!BE2_chip(adapter) &&
3165 		    le32_to_cpu(img_hdr_ptr->imageid) != 1)
3166 			continue;
3167 
3168 		if (skyhawk_chip(adapter))
3169 			status = be_flash_skyhawk(adapter, fw, &flash_cmd,
3170 						  num_imgs);
3171 		else
3172 			status = be_flash_BEx(adapter, fw, &flash_cmd,
3173 					      num_imgs);
3174 	}
3175 
3176 	dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
3177 	if (!status)
3178 		dev_info(dev, "Firmware flashed successfully\n");
3179 
3180 	return status;
3181 }
3182 
3183 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
3184 			    struct be_dma_mem *nonemb_cmd)
3185 {
3186 	struct be_mcc_wrb *wrb;
3187 	struct be_cmd_req_acpi_wol_magic_config *req;
3188 	int status;
3189 
3190 	spin_lock_bh(&adapter->mcc_lock);
3191 
3192 	wrb = wrb_from_mccq(adapter);
3193 	if (!wrb) {
3194 		status = -EBUSY;
3195 		goto err;
3196 	}
3197 	req = nonemb_cmd->va;
3198 
3199 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3200 			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
3201 			       wrb, nonemb_cmd);
3202 	memcpy(req->magic_mac, mac, ETH_ALEN);
3203 
3204 	status = be_mcc_notify_wait(adapter);
3205 
3206 err:
3207 	spin_unlock_bh(&adapter->mcc_lock);
3208 	return status;
3209 }
3210 
3211 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
3212 			u8 loopback_type, u8 enable)
3213 {
3214 	struct be_mcc_wrb *wrb;
3215 	struct be_cmd_req_set_lmode *req;
3216 	int status;
3217 
3218 	if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
3219 			    CMD_SUBSYSTEM_LOWLEVEL))
3220 		return -EPERM;
3221 
3222 	spin_lock_bh(&adapter->mcc_lock);
3223 
3224 	wrb = wrb_from_mccq(adapter);
3225 	if (!wrb) {
3226 		status = -EBUSY;
3227 		goto err_unlock;
3228 	}
3229 
3230 	req = embedded_payload(wrb);
3231 
3232 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
3233 			       OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
3234 			       wrb, NULL);
3235 
3236 	req->src_port = port_num;
3237 	req->dest_port = port_num;
3238 	req->loopback_type = loopback_type;
3239 	req->loopback_state = enable;
3240 
3241 	status = be_mcc_notify(adapter);
3242 	if (status)
3243 		goto err_unlock;
3244 
3245 	spin_unlock_bh(&adapter->mcc_lock);
3246 
3247 	if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
3248 					 msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
3249 		status = -ETIMEDOUT;
3250 
3251 	return status;
3252 
3253 err_unlock:
3254 	spin_unlock_bh(&adapter->mcc_lock);
3255 	return status;
3256 }
3257 
3258 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
3259 			 u32 loopback_type, u32 pkt_size, u32 num_pkts,
3260 			 u64 pattern)
3261 {
3262 	struct be_mcc_wrb *wrb;
3263 	struct be_cmd_req_loopback_test *req;
3264 	struct be_cmd_resp_loopback_test *resp;
3265 	int status;
3266 
3267 	if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_LOOPBACK_TEST,
3268 			    CMD_SUBSYSTEM_LOWLEVEL))
3269 		return -EPERM;
3270 
3271 	spin_lock_bh(&adapter->mcc_lock);
3272 
3273 	wrb = wrb_from_mccq(adapter);
3274 	if (!wrb) {
3275 		status = -EBUSY;
3276 		goto err;
3277 	}
3278 
3279 	req = embedded_payload(wrb);
3280 
3281 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
3282 			       OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
3283 			       NULL);
3284 
3285 	req->hdr.timeout = cpu_to_le32(15);
3286 	req->pattern = cpu_to_le64(pattern);
3287 	req->src_port = cpu_to_le32(port_num);
3288 	req->dest_port = cpu_to_le32(port_num);
3289 	req->pkt_size = cpu_to_le32(pkt_size);
3290 	req->num_pkts = cpu_to_le32(num_pkts);
3291 	req->loopback_type = cpu_to_le32(loopback_type);
3292 
3293 	status = be_mcc_notify(adapter);
3294 	if (status)
3295 		goto err;
3296 
3297 	spin_unlock_bh(&adapter->mcc_lock);
3298 
3299 	wait_for_completion(&adapter->et_cmd_compl);
3300 	resp = embedded_payload(wrb);
3301 	status = le32_to_cpu(resp->status);
3302 
3303 	return status;
3304 err:
3305 	spin_unlock_bh(&adapter->mcc_lock);
3306 	return status;
3307 }
3308 
3309 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
3310 			u32 byte_cnt, struct be_dma_mem *cmd)
3311 {
3312 	struct be_mcc_wrb *wrb;
3313 	struct be_cmd_req_ddrdma_test *req;
3314 	int status;
3315 	int i, j = 0;
3316 
3317 	if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_HOST_DDR_DMA,
3318 			    CMD_SUBSYSTEM_LOWLEVEL))
3319 		return -EPERM;
3320 
3321 	spin_lock_bh(&adapter->mcc_lock);
3322 
3323 	wrb = wrb_from_mccq(adapter);
3324 	if (!wrb) {
3325 		status = -EBUSY;
3326 		goto err;
3327 	}
3328 	req = cmd->va;
3329 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
3330 			       OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
3331 			       cmd);
3332 
3333 	req->pattern = cpu_to_le64(pattern);
3334 	req->byte_count = cpu_to_le32(byte_cnt);
3335 	for (i = 0; i < byte_cnt; i++) {
3336 		req->snd_buff[i] = (u8)(pattern >> (j*8));
3337 		j++;
3338 		if (j > 7)
3339 			j = 0;
3340 	}
3341 
3342 	status = be_mcc_notify_wait(adapter);
3343 
3344 	if (!status) {
3345 		struct be_cmd_resp_ddrdma_test *resp;
3346 
3347 		resp = cmd->va;
3348 		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
3349 		    resp->snd_err) {
3350 			status = -1;
3351 		}
3352 	}
3353 
3354 err:
3355 	spin_unlock_bh(&adapter->mcc_lock);
3356 	return status;
3357 }
3358 
3359 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
3360 			    struct be_dma_mem *nonemb_cmd)
3361 {
3362 	struct be_mcc_wrb *wrb;
3363 	struct be_cmd_req_seeprom_read *req;
3364 	int status;
3365 
3366 	spin_lock_bh(&adapter->mcc_lock);
3367 
3368 	wrb = wrb_from_mccq(adapter);
3369 	if (!wrb) {
3370 		status = -EBUSY;
3371 		goto err;
3372 	}
3373 	req = nonemb_cmd->va;
3374 
3375 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3376 			       OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
3377 			       nonemb_cmd);
3378 
3379 	status = be_mcc_notify_wait(adapter);
3380 
3381 err:
3382 	spin_unlock_bh(&adapter->mcc_lock);
3383 	return status;
3384 }
3385 
3386 int be_cmd_get_phy_info(struct be_adapter *adapter)
3387 {
3388 	struct be_mcc_wrb *wrb;
3389 	struct be_cmd_req_get_phy_info *req;
3390 	struct be_dma_mem cmd;
3391 	int status;
3392 
3393 	if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
3394 			    CMD_SUBSYSTEM_COMMON))
3395 		return -EPERM;
3396 
3397 	spin_lock_bh(&adapter->mcc_lock);
3398 
3399 	wrb = wrb_from_mccq(adapter);
3400 	if (!wrb) {
3401 		status = -EBUSY;
3402 		goto err;
3403 	}
3404 	cmd.size = sizeof(struct be_cmd_req_get_phy_info);
3405 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3406 				     GFP_ATOMIC);
3407 	if (!cmd.va) {
3408 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3409 		status = -ENOMEM;
3410 		goto err;
3411 	}
3412 
3413 	req = cmd.va;
3414 
3415 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3416 			       OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
3417 			       wrb, &cmd);
3418 
3419 	status = be_mcc_notify_wait(adapter);
3420 	if (!status) {
3421 		struct be_phy_info *resp_phy_info =
3422 				cmd.va + sizeof(struct be_cmd_req_hdr);
3423 
3424 		adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
3425 		adapter->phy.interface_type =
3426 			le16_to_cpu(resp_phy_info->interface_type);
3427 		adapter->phy.auto_speeds_supported =
3428 			le16_to_cpu(resp_phy_info->auto_speeds_supported);
3429 		adapter->phy.fixed_speeds_supported =
3430 			le16_to_cpu(resp_phy_info->fixed_speeds_supported);
3431 		adapter->phy.misc_params =
3432 			le32_to_cpu(resp_phy_info->misc_params);
3433 
3434 		if (BE2_chip(adapter)) {
3435 			adapter->phy.fixed_speeds_supported =
3436 				BE_SUPPORTED_SPEED_10GBPS |
3437 				BE_SUPPORTED_SPEED_1GBPS;
3438 		}
3439 	}
3440 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3441 err:
3442 	spin_unlock_bh(&adapter->mcc_lock);
3443 	return status;
3444 }
3445 
3446 static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
3447 {
3448 	struct be_mcc_wrb *wrb;
3449 	struct be_cmd_req_set_qos *req;
3450 	int status;
3451 
3452 	spin_lock_bh(&adapter->mcc_lock);
3453 
3454 	wrb = wrb_from_mccq(adapter);
3455 	if (!wrb) {
3456 		status = -EBUSY;
3457 		goto err;
3458 	}
3459 
3460 	req = embedded_payload(wrb);
3461 
3462 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3463 			       OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
3464 
3465 	req->hdr.domain = domain;
3466 	req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
3467 	req->max_bps_nic = cpu_to_le32(bps);
3468 
3469 	status = be_mcc_notify_wait(adapter);
3470 
3471 err:
3472 	spin_unlock_bh(&adapter->mcc_lock);
3473 	return status;
3474 }
3475 
3476 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
3477 {
3478 	struct be_mcc_wrb *wrb;
3479 	struct be_cmd_req_cntl_attribs *req;
3480 	struct be_cmd_resp_cntl_attribs *resp;
3481 	int status, i;
3482 	int payload_len = max(sizeof(*req), sizeof(*resp));
3483 	struct mgmt_controller_attrib *attribs;
3484 	struct be_dma_mem attribs_cmd;
3485 	u32 *serial_num;
3486 
3487 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3488 		return -1;
3489 
3490 	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
3491 	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
3492 	attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3493 					     attribs_cmd.size,
3494 					     &attribs_cmd.dma, GFP_ATOMIC);
3495 	if (!attribs_cmd.va) {
3496 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
3497 		status = -ENOMEM;
3498 		goto err;
3499 	}
3500 
3501 	wrb = wrb_from_mbox(adapter);
3502 	if (!wrb) {
3503 		status = -EBUSY;
3504 		goto err;
3505 	}
3506 	req = attribs_cmd.va;
3507 
3508 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3509 			       OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
3510 			       wrb, &attribs_cmd);
3511 
3512 	status = be_mbox_notify_wait(adapter);
3513 	if (!status) {
3514 		attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
3515 		adapter->hba_port_num = attribs->hba_attribs.phy_port;
3516 		serial_num = attribs->hba_attribs.controller_serial_number;
3517 		for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
3518 			adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
3519 				(BIT_MASK(16) - 1);
3520 	}
3521 
3522 err:
3523 	mutex_unlock(&adapter->mbox_lock);
3524 	if (attribs_cmd.va)
3525 		dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
3526 				  attribs_cmd.va, attribs_cmd.dma);
3527 	return status;
3528 }
3529 
3530 /* Uses mbox */
3531 int be_cmd_req_native_mode(struct be_adapter *adapter)
3532 {
3533 	struct be_mcc_wrb *wrb;
3534 	struct be_cmd_req_set_func_cap *req;
3535 	int status;
3536 
3537 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3538 		return -1;
3539 
3540 	wrb = wrb_from_mbox(adapter);
3541 	if (!wrb) {
3542 		status = -EBUSY;
3543 		goto err;
3544 	}
3545 
3546 	req = embedded_payload(wrb);
3547 
3548 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3549 			       OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
3550 			       sizeof(*req), wrb, NULL);
3551 
3552 	req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
3553 				CAPABILITY_BE3_NATIVE_ERX_API);
3554 	req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
3555 
3556 	status = be_mbox_notify_wait(adapter);
3557 	if (!status) {
3558 		struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
3559 
3560 		adapter->be3_native = le32_to_cpu(resp->cap_flags) &
3561 					CAPABILITY_BE3_NATIVE_ERX_API;
3562 		if (!adapter->be3_native)
3563 			dev_warn(&adapter->pdev->dev,
3564 				 "adapter not in advanced mode\n");
3565 	}
3566 err:
3567 	mutex_unlock(&adapter->mbox_lock);
3568 	return status;
3569 }
3570 
3571 /* Get privilege(s) for a function */
3572 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
3573 			     u32 domain)
3574 {
3575 	struct be_mcc_wrb *wrb;
3576 	struct be_cmd_req_get_fn_privileges *req;
3577 	int status;
3578 
3579 	spin_lock_bh(&adapter->mcc_lock);
3580 
3581 	wrb = wrb_from_mccq(adapter);
3582 	if (!wrb) {
3583 		status = -EBUSY;
3584 		goto err;
3585 	}
3586 
3587 	req = embedded_payload(wrb);
3588 
3589 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3590 			       OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
3591 			       wrb, NULL);
3592 
3593 	req->hdr.domain = domain;
3594 
3595 	status = be_mcc_notify_wait(adapter);
3596 	if (!status) {
3597 		struct be_cmd_resp_get_fn_privileges *resp =
3598 						embedded_payload(wrb);
3599 
3600 		*privilege = le32_to_cpu(resp->privilege_mask);
3601 
3602 		/* In UMC mode FW does not return right privileges.
3603 		 * Override with correct privilege equivalent to PF.
3604 		 */
3605 		if (BEx_chip(adapter) && be_is_mc(adapter) &&
3606 		    be_physfn(adapter))
3607 			*privilege = MAX_PRIVILEGES;
3608 	}
3609 
3610 err:
3611 	spin_unlock_bh(&adapter->mcc_lock);
3612 	return status;
3613 }
3614 
3615 /* Set privilege(s) for a function */
3616 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
3617 			     u32 domain)
3618 {
3619 	struct be_mcc_wrb *wrb;
3620 	struct be_cmd_req_set_fn_privileges *req;
3621 	int status;
3622 
3623 	spin_lock_bh(&adapter->mcc_lock);
3624 
3625 	wrb = wrb_from_mccq(adapter);
3626 	if (!wrb) {
3627 		status = -EBUSY;
3628 		goto err;
3629 	}
3630 
3631 	req = embedded_payload(wrb);
3632 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3633 			       OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
3634 			       wrb, NULL);
3635 	req->hdr.domain = domain;
3636 	if (lancer_chip(adapter))
3637 		req->privileges_lancer = cpu_to_le32(privileges);
3638 	else
3639 		req->privileges = cpu_to_le32(privileges);
3640 
3641 	status = be_mcc_notify_wait(adapter);
3642 err:
3643 	spin_unlock_bh(&adapter->mcc_lock);
3644 	return status;
3645 }
3646 
3647 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
3648  * pmac_id_valid: false => pmac_id or MAC address is requested.
3649  *		  If pmac_id is returned, pmac_id_valid is returned as true
3650  */
3651 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
3652 			     bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
3653 			     u8 domain)
3654 {
3655 	struct be_mcc_wrb *wrb;
3656 	struct be_cmd_req_get_mac_list *req;
3657 	int status;
3658 	int mac_count;
3659 	struct be_dma_mem get_mac_list_cmd;
3660 	int i;
3661 
3662 	memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
3663 	get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
3664 	get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
3665 						  get_mac_list_cmd.size,
3666 						  &get_mac_list_cmd.dma,
3667 						  GFP_ATOMIC);
3668 
3669 	if (!get_mac_list_cmd.va) {
3670 		dev_err(&adapter->pdev->dev,
3671 			"Memory allocation failure during GET_MAC_LIST\n");
3672 		return -ENOMEM;
3673 	}
3674 
3675 	spin_lock_bh(&adapter->mcc_lock);
3676 
3677 	wrb = wrb_from_mccq(adapter);
3678 	if (!wrb) {
3679 		status = -EBUSY;
3680 		goto out;
3681 	}
3682 
3683 	req = get_mac_list_cmd.va;
3684 
3685 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3686 			       OPCODE_COMMON_GET_MAC_LIST,
3687 			       get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
3688 	req->hdr.domain = domain;
3689 	req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
3690 	if (*pmac_id_valid) {
3691 		req->mac_id = cpu_to_le32(*pmac_id);
3692 		req->iface_id = cpu_to_le16(if_handle);
3693 		req->perm_override = 0;
3694 	} else {
3695 		req->perm_override = 1;
3696 	}
3697 
3698 	status = be_mcc_notify_wait(adapter);
3699 	if (!status) {
3700 		struct be_cmd_resp_get_mac_list *resp =
3701 						get_mac_list_cmd.va;
3702 
3703 		if (*pmac_id_valid) {
3704 			memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
3705 			       ETH_ALEN);
3706 			goto out;
3707 		}
3708 
3709 		mac_count = resp->true_mac_count + resp->pseudo_mac_count;
3710 		/* Mac list returned could contain one or more active mac_ids
3711 		 * or one or more true or pseudo permanent mac addresses.
3712 		 * If an active mac_id is present, return first active mac_id
3713 		 * found.
3714 		 */
3715 		for (i = 0; i < mac_count; i++) {
3716 			struct get_list_macaddr *mac_entry;
3717 			u16 mac_addr_size;
3718 			u32 mac_id;
3719 
3720 			mac_entry = &resp->macaddr_list[i];
3721 			mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
3722 			/* mac_id is a 32 bit value and mac_addr size
3723 			 * is 6 bytes
3724 			 */
3725 			if (mac_addr_size == sizeof(u32)) {
3726 				*pmac_id_valid = true;
3727 				mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
3728 				*pmac_id = le32_to_cpu(mac_id);
3729 				goto out;
3730 			}
3731 		}
3732 		/* If no active mac_id found, return first mac addr */
3733 		*pmac_id_valid = false;
3734 		memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
3735 		       ETH_ALEN);
3736 	}
3737 
3738 out:
3739 	spin_unlock_bh(&adapter->mcc_lock);
3740 	dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
3741 			  get_mac_list_cmd.va, get_mac_list_cmd.dma);
3742 	return status;
3743 }
3744 
3745 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
3746 			  u8 *mac, u32 if_handle, bool active, u32 domain)
3747 {
3748 	if (!active)
3749 		be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
3750 					 if_handle, domain);
3751 	if (BEx_chip(adapter))
3752 		return be_cmd_mac_addr_query(adapter, mac, false,
3753 					     if_handle, curr_pmac_id);
3754 	else
3755 		/* Fetch the MAC address using pmac_id */
3756 		return be_cmd_get_mac_from_list(adapter, mac, &active,
3757 						&curr_pmac_id,
3758 						if_handle, domain);
3759 }
3760 
3761 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
3762 {
3763 	int status;
3764 	bool pmac_valid = false;
3765 
3766 	eth_zero_addr(mac);
3767 
3768 	if (BEx_chip(adapter)) {
3769 		if (be_physfn(adapter))
3770 			status = be_cmd_mac_addr_query(adapter, mac, true, 0,
3771 						       0);
3772 		else
3773 			status = be_cmd_mac_addr_query(adapter, mac, false,
3774 						       adapter->if_handle, 0);
3775 	} else {
3776 		status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
3777 						  NULL, adapter->if_handle, 0);
3778 	}
3779 
3780 	return status;
3781 }
3782 
3783 /* Uses synchronous MCCQ */
3784 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
3785 			u8 mac_count, u32 domain)
3786 {
3787 	struct be_mcc_wrb *wrb;
3788 	struct be_cmd_req_set_mac_list *req;
3789 	int status;
3790 	struct be_dma_mem cmd;
3791 
3792 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3793 	cmd.size = sizeof(struct be_cmd_req_set_mac_list);
3794 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3795 				     GFP_KERNEL);
3796 	if (!cmd.va)
3797 		return -ENOMEM;
3798 
3799 	spin_lock_bh(&adapter->mcc_lock);
3800 
3801 	wrb = wrb_from_mccq(adapter);
3802 	if (!wrb) {
3803 		status = -EBUSY;
3804 		goto err;
3805 	}
3806 
3807 	req = cmd.va;
3808 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3809 			       OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
3810 			       wrb, &cmd);
3811 
3812 	req->hdr.domain = domain;
3813 	req->mac_count = mac_count;
3814 	if (mac_count)
3815 		memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
3816 
3817 	status = be_mcc_notify_wait(adapter);
3818 
3819 err:
3820 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
3821 	spin_unlock_bh(&adapter->mcc_lock);
3822 	return status;
3823 }
3824 
3825 /* Wrapper to delete any active MACs and provision the new mac.
3826  * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3827  * current list are active.
3828  */
3829 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
3830 {
3831 	bool active_mac = false;
3832 	u8 old_mac[ETH_ALEN];
3833 	u32 pmac_id;
3834 	int status;
3835 
3836 	status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
3837 					  &pmac_id, if_id, dom);
3838 
3839 	if (!status && active_mac)
3840 		be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
3841 
3842 	return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
3843 }
3844 
3845 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
3846 			  u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
3847 {
3848 	struct be_mcc_wrb *wrb;
3849 	struct be_cmd_req_set_hsw_config *req;
3850 	void *ctxt;
3851 	int status;
3852 
3853 	spin_lock_bh(&adapter->mcc_lock);
3854 
3855 	wrb = wrb_from_mccq(adapter);
3856 	if (!wrb) {
3857 		status = -EBUSY;
3858 		goto err;
3859 	}
3860 
3861 	req = embedded_payload(wrb);
3862 	ctxt = &req->context;
3863 
3864 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3865 			       OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
3866 			       NULL);
3867 
3868 	req->hdr.domain = domain;
3869 	AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
3870 	if (pvid) {
3871 		AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
3872 		AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
3873 	}
3874 	if (!BEx_chip(adapter) && hsw_mode) {
3875 		AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
3876 			      ctxt, adapter->hba_port_num);
3877 		AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
3878 		AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
3879 			      ctxt, hsw_mode);
3880 	}
3881 
3882 	/* Enable/disable both mac and vlan spoof checking */
3883 	if (!BEx_chip(adapter) && spoofchk) {
3884 		AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
3885 			      ctxt, spoofchk);
3886 		AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
3887 			      ctxt, spoofchk);
3888 	}
3889 
3890 	be_dws_cpu_to_le(req->context, sizeof(req->context));
3891 	status = be_mcc_notify_wait(adapter);
3892 
3893 err:
3894 	spin_unlock_bh(&adapter->mcc_lock);
3895 	return status;
3896 }
3897 
3898 /* Get Hyper switch config */
3899 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
3900 			  u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
3901 {
3902 	struct be_mcc_wrb *wrb;
3903 	struct be_cmd_req_get_hsw_config *req;
3904 	void *ctxt;
3905 	int status;
3906 	u16 vid;
3907 
3908 	spin_lock_bh(&adapter->mcc_lock);
3909 
3910 	wrb = wrb_from_mccq(adapter);
3911 	if (!wrb) {
3912 		status = -EBUSY;
3913 		goto err;
3914 	}
3915 
3916 	req = embedded_payload(wrb);
3917 	ctxt = &req->context;
3918 
3919 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3920 			       OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
3921 			       NULL);
3922 
3923 	req->hdr.domain = domain;
3924 	AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3925 		      ctxt, intf_id);
3926 	AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
3927 
3928 	if (!BEx_chip(adapter) && mode) {
3929 		AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
3930 			      ctxt, adapter->hba_port_num);
3931 		AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
3932 	}
3933 	be_dws_cpu_to_le(req->context, sizeof(req->context));
3934 
3935 	status = be_mcc_notify_wait(adapter);
3936 	if (!status) {
3937 		struct be_cmd_resp_get_hsw_config *resp =
3938 						embedded_payload(wrb);
3939 
3940 		be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
3941 		vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3942 				    pvid, &resp->context);
3943 		if (pvid)
3944 			*pvid = le16_to_cpu(vid);
3945 		if (mode)
3946 			*mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3947 					      port_fwd_type, &resp->context);
3948 		if (spoofchk)
3949 			*spoofchk =
3950 				AMAP_GET_BITS(struct amap_get_hsw_resp_context,
3951 					      spoofchk, &resp->context);
3952 	}
3953 
3954 err:
3955 	spin_unlock_bh(&adapter->mcc_lock);
3956 	return status;
3957 }
3958 
3959 static bool be_is_wol_excluded(struct be_adapter *adapter)
3960 {
3961 	struct pci_dev *pdev = adapter->pdev;
3962 
3963 	if (be_virtfn(adapter))
3964 		return true;
3965 
3966 	switch (pdev->subsystem_device) {
3967 	case OC_SUBSYS_DEVICE_ID1:
3968 	case OC_SUBSYS_DEVICE_ID2:
3969 	case OC_SUBSYS_DEVICE_ID3:
3970 	case OC_SUBSYS_DEVICE_ID4:
3971 		return true;
3972 	default:
3973 		return false;
3974 	}
3975 }
3976 
3977 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
3978 {
3979 	struct be_mcc_wrb *wrb;
3980 	struct be_cmd_req_acpi_wol_magic_config_v1 *req;
3981 	int status = 0;
3982 	struct be_dma_mem cmd;
3983 
3984 	if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3985 			    CMD_SUBSYSTEM_ETH))
3986 		return -EPERM;
3987 
3988 	if (be_is_wol_excluded(adapter))
3989 		return status;
3990 
3991 	if (mutex_lock_interruptible(&adapter->mbox_lock))
3992 		return -1;
3993 
3994 	memset(&cmd, 0, sizeof(struct be_dma_mem));
3995 	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3996 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3997 				     GFP_ATOMIC);
3998 	if (!cmd.va) {
3999 		dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
4000 		status = -ENOMEM;
4001 		goto err;
4002 	}
4003 
4004 	wrb = wrb_from_mbox(adapter);
4005 	if (!wrb) {
4006 		status = -EBUSY;
4007 		goto err;
4008 	}
4009 
4010 	req = cmd.va;
4011 
4012 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
4013 			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
4014 			       sizeof(*req), wrb, &cmd);
4015 
4016 	req->hdr.version = 1;
4017 	req->query_options = BE_GET_WOL_CAP;
4018 
4019 	status = be_mbox_notify_wait(adapter);
4020 	if (!status) {
4021 		struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
4022 
4023 		resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
4024 
4025 		adapter->wol_cap = resp->wol_settings;
4026 		if (adapter->wol_cap & BE_WOL_CAP)
4027 			adapter->wol_en = true;
4028 	}
4029 err:
4030 	mutex_unlock(&adapter->mbox_lock);
4031 	if (cmd.va)
4032 		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4033 				  cmd.dma);
4034 	return status;
4035 
4036 }
4037 
4038 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
4039 {
4040 	struct be_dma_mem extfat_cmd;
4041 	struct be_fat_conf_params *cfgs;
4042 	int status;
4043 	int i, j;
4044 
4045 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4046 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4047 	extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
4048 					    extfat_cmd.size, &extfat_cmd.dma,
4049 					    GFP_ATOMIC);
4050 	if (!extfat_cmd.va)
4051 		return -ENOMEM;
4052 
4053 	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4054 	if (status)
4055 		goto err;
4056 
4057 	cfgs = (struct be_fat_conf_params *)
4058 			(extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
4059 	for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
4060 		u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
4061 
4062 		for (j = 0; j < num_modes; j++) {
4063 			if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
4064 				cfgs->module[i].trace_lvl[j].dbg_lvl =
4065 							cpu_to_le32(level);
4066 		}
4067 	}
4068 
4069 	status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
4070 err:
4071 	dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
4072 			  extfat_cmd.dma);
4073 	return status;
4074 }
4075 
4076 int be_cmd_get_fw_log_level(struct be_adapter *adapter)
4077 {
4078 	struct be_dma_mem extfat_cmd;
4079 	struct be_fat_conf_params *cfgs;
4080 	int status, j;
4081 	int level = 0;
4082 
4083 	memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4084 	extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4085 	extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
4086 					    extfat_cmd.size, &extfat_cmd.dma,
4087 					    GFP_ATOMIC);
4088 
4089 	if (!extfat_cmd.va) {
4090 		dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4091 			__func__);
4092 		goto err;
4093 	}
4094 
4095 	status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4096 	if (!status) {
4097 		cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4098 						sizeof(struct be_cmd_resp_hdr));
4099 
4100 		for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4101 			if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4102 				level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4103 		}
4104 	}
4105 	dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
4106 			  extfat_cmd.dma);
4107 err:
4108 	return level;
4109 }
4110 
4111 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
4112 				   struct be_dma_mem *cmd)
4113 {
4114 	struct be_mcc_wrb *wrb;
4115 	struct be_cmd_req_get_ext_fat_caps *req;
4116 	int status;
4117 
4118 	if (mutex_lock_interruptible(&adapter->mbox_lock))
4119 		return -1;
4120 
4121 	wrb = wrb_from_mbox(adapter);
4122 	if (!wrb) {
4123 		status = -EBUSY;
4124 		goto err;
4125 	}
4126 
4127 	req = cmd->va;
4128 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4129 			       OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
4130 			       cmd->size, wrb, cmd);
4131 	req->parameter_type = cpu_to_le32(1);
4132 
4133 	status = be_mbox_notify_wait(adapter);
4134 err:
4135 	mutex_unlock(&adapter->mbox_lock);
4136 	return status;
4137 }
4138 
4139 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
4140 				   struct be_dma_mem *cmd,
4141 				   struct be_fat_conf_params *configs)
4142 {
4143 	struct be_mcc_wrb *wrb;
4144 	struct be_cmd_req_set_ext_fat_caps *req;
4145 	int status;
4146 
4147 	spin_lock_bh(&adapter->mcc_lock);
4148 
4149 	wrb = wrb_from_mccq(adapter);
4150 	if (!wrb) {
4151 		status = -EBUSY;
4152 		goto err;
4153 	}
4154 
4155 	req = cmd->va;
4156 	memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
4157 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4158 			       OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
4159 			       cmd->size, wrb, cmd);
4160 
4161 	status = be_mcc_notify_wait(adapter);
4162 err:
4163 	spin_unlock_bh(&adapter->mcc_lock);
4164 	return status;
4165 }
4166 
4167 int be_cmd_query_port_name(struct be_adapter *adapter)
4168 {
4169 	struct be_cmd_req_get_port_name *req;
4170 	struct be_mcc_wrb *wrb;
4171 	int status;
4172 
4173 	if (mutex_lock_interruptible(&adapter->mbox_lock))
4174 		return -1;
4175 
4176 	wrb = wrb_from_mbox(adapter);
4177 	req = embedded_payload(wrb);
4178 
4179 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4180 			       OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
4181 			       NULL);
4182 	if (!BEx_chip(adapter))
4183 		req->hdr.version = 1;
4184 
4185 	status = be_mbox_notify_wait(adapter);
4186 	if (!status) {
4187 		struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
4188 
4189 		adapter->port_name = resp->port_name[adapter->hba_port_num];
4190 	} else {
4191 		adapter->port_name = adapter->hba_port_num + '0';
4192 	}
4193 
4194 	mutex_unlock(&adapter->mbox_lock);
4195 	return status;
4196 }
4197 
4198 /* When more than 1 NIC descriptor is present in the descriptor list,
4199  * the caller must specify the pf_num to obtain the NIC descriptor
4200  * corresponding to its pci function.
4201  * get_vft must be true when the caller wants the VF-template desc of the
4202  * PF-pool.
4203  * The pf_num should be set to PF_NUM_IGNORE when the caller knows
4204  * that only it's NIC descriptor is present in the descriptor list.
4205  */
4206 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
4207 					       bool get_vft, u8 pf_num)
4208 {
4209 	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4210 	struct be_nic_res_desc *nic;
4211 	int i;
4212 
4213 	for (i = 0; i < desc_count; i++) {
4214 		if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
4215 		    hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
4216 			nic = (struct be_nic_res_desc *)hdr;
4217 
4218 			if ((pf_num == PF_NUM_IGNORE ||
4219 			     nic->pf_num == pf_num) &&
4220 			    (!get_vft || nic->flags & BIT(VFT_SHIFT)))
4221 				return nic;
4222 		}
4223 		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4224 		hdr = (void *)hdr + hdr->desc_len;
4225 	}
4226 	return NULL;
4227 }
4228 
4229 static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count,
4230 					       u8 pf_num)
4231 {
4232 	return be_get_nic_desc(buf, desc_count, true, pf_num);
4233 }
4234 
4235 static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count,
4236 						    u8 pf_num)
4237 {
4238 	return be_get_nic_desc(buf, desc_count, false, pf_num);
4239 }
4240 
4241 static struct be_pcie_res_desc *be_get_pcie_desc(u8 *buf, u32 desc_count,
4242 						 u8 pf_num)
4243 {
4244 	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4245 	struct be_pcie_res_desc *pcie;
4246 	int i;
4247 
4248 	for (i = 0; i < desc_count; i++) {
4249 		if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
4250 		    hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
4251 			pcie = (struct be_pcie_res_desc *)hdr;
4252 			if (pcie->pf_num == pf_num)
4253 				return pcie;
4254 		}
4255 
4256 		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4257 		hdr = (void *)hdr + hdr->desc_len;
4258 	}
4259 	return NULL;
4260 }
4261 
4262 static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
4263 {
4264 	struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
4265 	int i;
4266 
4267 	for (i = 0; i < desc_count; i++) {
4268 		if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
4269 			return (struct be_port_res_desc *)hdr;
4270 
4271 		hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
4272 		hdr = (void *)hdr + hdr->desc_len;
4273 	}
4274 	return NULL;
4275 }
4276 
4277 static void be_copy_nic_desc(struct be_resources *res,
4278 			     struct be_nic_res_desc *desc)
4279 {
4280 	res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
4281 	res->max_vlans = le16_to_cpu(desc->vlan_count);
4282 	res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
4283 	res->max_tx_qs = le16_to_cpu(desc->txq_count);
4284 	res->max_rss_qs = le16_to_cpu(desc->rssq_count);
4285 	res->max_rx_qs = le16_to_cpu(desc->rq_count);
4286 	res->max_evt_qs = le16_to_cpu(desc->eq_count);
4287 	res->max_cq_count = le16_to_cpu(desc->cq_count);
4288 	res->max_iface_count = le16_to_cpu(desc->iface_count);
4289 	res->max_mcc_count = le16_to_cpu(desc->mcc_count);
4290 	/* Clear flags that driver is not interested in */
4291 	res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
4292 				BE_IF_CAP_FLAGS_WANT;
4293 }
4294 
4295 /* Uses Mbox */
4296 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
4297 {
4298 	struct be_mcc_wrb *wrb;
4299 	struct be_cmd_req_get_func_config *req;
4300 	int status;
4301 	struct be_dma_mem cmd;
4302 
4303 	if (mutex_lock_interruptible(&adapter->mbox_lock))
4304 		return -1;
4305 
4306 	memset(&cmd, 0, sizeof(struct be_dma_mem));
4307 	cmd.size = sizeof(struct be_cmd_resp_get_func_config);
4308 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4309 				     GFP_ATOMIC);
4310 	if (!cmd.va) {
4311 		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
4312 		status = -ENOMEM;
4313 		goto err;
4314 	}
4315 
4316 	wrb = wrb_from_mbox(adapter);
4317 	if (!wrb) {
4318 		status = -EBUSY;
4319 		goto err;
4320 	}
4321 
4322 	req = cmd.va;
4323 
4324 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4325 			       OPCODE_COMMON_GET_FUNC_CONFIG,
4326 			       cmd.size, wrb, &cmd);
4327 
4328 	if (skyhawk_chip(adapter))
4329 		req->hdr.version = 1;
4330 
4331 	status = be_mbox_notify_wait(adapter);
4332 	if (!status) {
4333 		struct be_cmd_resp_get_func_config *resp = cmd.va;
4334 		u32 desc_count = le32_to_cpu(resp->desc_count);
4335 		struct be_nic_res_desc *desc;
4336 
4337 		/* GET_FUNC_CONFIG returns resource descriptors of the
4338 		 * current function only. So, pf_num should be set to
4339 		 * PF_NUM_IGNORE.
4340 		 */
4341 		desc = be_get_func_nic_desc(resp->func_param, desc_count,
4342 					    PF_NUM_IGNORE);
4343 		if (!desc) {
4344 			status = -EINVAL;
4345 			goto err;
4346 		}
4347 
4348 		/* Store pf_num & vf_num for later use in GET_PROFILE_CONFIG */
4349 		adapter->pf_num = desc->pf_num;
4350 		adapter->vf_num = desc->vf_num;
4351 
4352 		if (res)
4353 			be_copy_nic_desc(res, desc);
4354 	}
4355 err:
4356 	mutex_unlock(&adapter->mbox_lock);
4357 	if (cmd.va)
4358 		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4359 				  cmd.dma);
4360 	return status;
4361 }
4362 
4363 /* Will use MBOX only if MCCQ has not been created */
4364 int be_cmd_get_profile_config(struct be_adapter *adapter,
4365 			      struct be_resources *res, u8 query, u8 domain)
4366 {
4367 	struct be_cmd_resp_get_profile_config *resp;
4368 	struct be_cmd_req_get_profile_config *req;
4369 	struct be_nic_res_desc *vf_res;
4370 	struct be_pcie_res_desc *pcie;
4371 	struct be_port_res_desc *port;
4372 	struct be_nic_res_desc *nic;
4373 	struct be_mcc_wrb wrb = {0};
4374 	struct be_dma_mem cmd;
4375 	u16 desc_count;
4376 	int status;
4377 
4378 	memset(&cmd, 0, sizeof(struct be_dma_mem));
4379 	cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
4380 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4381 				     GFP_ATOMIC);
4382 	if (!cmd.va)
4383 		return -ENOMEM;
4384 
4385 	req = cmd.va;
4386 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4387 			       OPCODE_COMMON_GET_PROFILE_CONFIG,
4388 			       cmd.size, &wrb, &cmd);
4389 
4390 	if (!lancer_chip(adapter))
4391 		req->hdr.version = 1;
4392 	req->type = ACTIVE_PROFILE_TYPE;
4393 	req->hdr.domain = domain;
4394 
4395 	/* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
4396 	 * descriptors with all bits set to "1" for the fields which can be
4397 	 * modified using SET_PROFILE_CONFIG cmd.
4398 	 */
4399 	if (query == RESOURCE_MODIFIABLE)
4400 		req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
4401 
4402 	status = be_cmd_notify_wait(adapter, &wrb);
4403 	if (status)
4404 		goto err;
4405 
4406 	resp = cmd.va;
4407 	desc_count = le16_to_cpu(resp->desc_count);
4408 
4409 	pcie = be_get_pcie_desc(resp->func_param, desc_count,
4410 				adapter->pf_num);
4411 	if (pcie)
4412 		res->max_vfs = le16_to_cpu(pcie->num_vfs);
4413 
4414 	port = be_get_port_desc(resp->func_param, desc_count);
4415 	if (port)
4416 		adapter->mc_type = port->mc_type;
4417 
4418 	nic = be_get_func_nic_desc(resp->func_param, desc_count,
4419 				   adapter->pf_num);
4420 	if (nic)
4421 		be_copy_nic_desc(res, nic);
4422 
4423 	vf_res = be_get_vft_desc(resp->func_param, desc_count,
4424 				 adapter->pf_num);
4425 	if (vf_res)
4426 		res->vf_if_cap_flags = vf_res->cap_flags;
4427 err:
4428 	if (cmd.va)
4429 		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4430 				  cmd.dma);
4431 	return status;
4432 }
4433 
4434 /* Will use MBOX only if MCCQ has not been created */
4435 static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
4436 				     int size, int count, u8 version, u8 domain)
4437 {
4438 	struct be_cmd_req_set_profile_config *req;
4439 	struct be_mcc_wrb wrb = {0};
4440 	struct be_dma_mem cmd;
4441 	int status;
4442 
4443 	memset(&cmd, 0, sizeof(struct be_dma_mem));
4444 	cmd.size = sizeof(struct be_cmd_req_set_profile_config);
4445 	cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
4446 				     GFP_ATOMIC);
4447 	if (!cmd.va)
4448 		return -ENOMEM;
4449 
4450 	req = cmd.va;
4451 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4452 			       OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
4453 			       &wrb, &cmd);
4454 	req->hdr.version = version;
4455 	req->hdr.domain = domain;
4456 	req->desc_count = cpu_to_le32(count);
4457 	memcpy(req->desc, desc, size);
4458 
4459 	status = be_cmd_notify_wait(adapter, &wrb);
4460 
4461 	if (cmd.va)
4462 		dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
4463 				  cmd.dma);
4464 	return status;
4465 }
4466 
4467 /* Mark all fields invalid */
4468 static void be_reset_nic_desc(struct be_nic_res_desc *nic)
4469 {
4470 	memset(nic, 0, sizeof(*nic));
4471 	nic->unicast_mac_count = 0xFFFF;
4472 	nic->mcc_count = 0xFFFF;
4473 	nic->vlan_count = 0xFFFF;
4474 	nic->mcast_mac_count = 0xFFFF;
4475 	nic->txq_count = 0xFFFF;
4476 	nic->rq_count = 0xFFFF;
4477 	nic->rssq_count = 0xFFFF;
4478 	nic->lro_count = 0xFFFF;
4479 	nic->cq_count = 0xFFFF;
4480 	nic->toe_conn_count = 0xFFFF;
4481 	nic->eq_count = 0xFFFF;
4482 	nic->iface_count = 0xFFFF;
4483 	nic->link_param = 0xFF;
4484 	nic->channel_id_param = cpu_to_le16(0xF000);
4485 	nic->acpi_params = 0xFF;
4486 	nic->wol_param = 0x0F;
4487 	nic->tunnel_iface_count = 0xFFFF;
4488 	nic->direct_tenant_iface_count = 0xFFFF;
4489 	nic->bw_min = 0xFFFFFFFF;
4490 	nic->bw_max = 0xFFFFFFFF;
4491 }
4492 
4493 /* Mark all fields invalid */
4494 static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
4495 {
4496 	memset(pcie, 0, sizeof(*pcie));
4497 	pcie->sriov_state = 0xFF;
4498 	pcie->pf_state = 0xFF;
4499 	pcie->pf_type = 0xFF;
4500 	pcie->num_vfs = 0xFFFF;
4501 }
4502 
4503 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
4504 		      u8 domain)
4505 {
4506 	struct be_nic_res_desc nic_desc;
4507 	u32 bw_percent;
4508 	u16 version = 0;
4509 
4510 	if (BE3_chip(adapter))
4511 		return be_cmd_set_qos(adapter, max_rate / 10, domain);
4512 
4513 	be_reset_nic_desc(&nic_desc);
4514 	nic_desc.pf_num = adapter->pf_num;
4515 	nic_desc.vf_num = domain;
4516 	nic_desc.bw_min = 0;
4517 	if (lancer_chip(adapter)) {
4518 		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
4519 		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
4520 		nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
4521 					(1 << NOSV_SHIFT);
4522 		nic_desc.bw_max = cpu_to_le32(max_rate / 10);
4523 	} else {
4524 		version = 1;
4525 		nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
4526 		nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4527 		nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
4528 		bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
4529 		nic_desc.bw_max = cpu_to_le32(bw_percent);
4530 	}
4531 
4532 	return be_cmd_set_profile_config(adapter, &nic_desc,
4533 					 nic_desc.hdr.desc_len,
4534 					 1, version, domain);
4535 }
4536 
4537 static void be_fill_vf_res_template(struct be_adapter *adapter,
4538 				    struct be_resources pool_res,
4539 				    u16 num_vfs, u16 num_vf_qs,
4540 				    struct be_nic_res_desc *nic_vft)
4541 {
4542 	u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
4543 	struct be_resources res_mod = {0};
4544 
4545 	/* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4546 	 * which are modifiable using SET_PROFILE_CONFIG cmd.
4547 	 */
4548 	be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
4549 
4550 	/* If RSS IFACE capability flags are modifiable for a VF, set the
4551 	 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4552 	 * more than 1 RSSQ is available for a VF.
4553 	 * Otherwise, provision only 1 queue pair for VF.
4554 	 */
4555 	if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4556 		nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4557 		if (num_vf_qs > 1) {
4558 			vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4559 			if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4560 				vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4561 		} else {
4562 			vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4563 					     BE_IF_FLAGS_DEFQ_RSS);
4564 		}
4565 	} else {
4566 		num_vf_qs = 1;
4567 	}
4568 
4569 	if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4570 		nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4571 		vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4572 	}
4573 
4574 	nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
4575 	nic_vft->rq_count = cpu_to_le16(num_vf_qs);
4576 	nic_vft->txq_count = cpu_to_le16(num_vf_qs);
4577 	nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
4578 	nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
4579 					(num_vfs + 1));
4580 
4581 	/* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4582 	 * among the PF and it's VFs, if the fields are changeable
4583 	 */
4584 	if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4585 		nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
4586 							 (num_vfs + 1));
4587 
4588 	if (res_mod.max_vlans == FIELD_MODIFIABLE)
4589 		nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
4590 						  (num_vfs + 1));
4591 
4592 	if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4593 		nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
4594 						   (num_vfs + 1));
4595 
4596 	if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4597 		nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
4598 						 (num_vfs + 1));
4599 }
4600 
4601 int be_cmd_set_sriov_config(struct be_adapter *adapter,
4602 			    struct be_resources pool_res, u16 num_vfs,
4603 			    u16 num_vf_qs)
4604 {
4605 	struct {
4606 		struct be_pcie_res_desc pcie;
4607 		struct be_nic_res_desc nic_vft;
4608 	} __packed desc;
4609 
4610 	/* PF PCIE descriptor */
4611 	be_reset_pcie_desc(&desc.pcie);
4612 	desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
4613 	desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4614 	desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
4615 	desc.pcie.pf_num = adapter->pdev->devfn;
4616 	desc.pcie.sriov_state = num_vfs ? 1 : 0;
4617 	desc.pcie.num_vfs = cpu_to_le16(num_vfs);
4618 
4619 	/* VF NIC Template descriptor */
4620 	be_reset_nic_desc(&desc.nic_vft);
4621 	desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
4622 	desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4623 	desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
4624 	desc.nic_vft.pf_num = adapter->pdev->devfn;
4625 	desc.nic_vft.vf_num = 0;
4626 
4627 	be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
4628 				&desc.nic_vft);
4629 
4630 	return be_cmd_set_profile_config(adapter, &desc,
4631 					 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
4632 }
4633 
4634 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
4635 {
4636 	struct be_mcc_wrb *wrb;
4637 	struct be_cmd_req_manage_iface_filters *req;
4638 	int status;
4639 
4640 	if (iface == 0xFFFFFFFF)
4641 		return -1;
4642 
4643 	spin_lock_bh(&adapter->mcc_lock);
4644 
4645 	wrb = wrb_from_mccq(adapter);
4646 	if (!wrb) {
4647 		status = -EBUSY;
4648 		goto err;
4649 	}
4650 	req = embedded_payload(wrb);
4651 
4652 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4653 			       OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
4654 			       wrb, NULL);
4655 	req->op = op;
4656 	req->target_iface_id = cpu_to_le32(iface);
4657 
4658 	status = be_mcc_notify_wait(adapter);
4659 err:
4660 	spin_unlock_bh(&adapter->mcc_lock);
4661 	return status;
4662 }
4663 
4664 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
4665 {
4666 	struct be_port_res_desc port_desc;
4667 
4668 	memset(&port_desc, 0, sizeof(port_desc));
4669 	port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
4670 	port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
4671 	port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
4672 	port_desc.link_num = adapter->hba_port_num;
4673 	if (port) {
4674 		port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
4675 					(1 << RCVID_SHIFT);
4676 		port_desc.nv_port = swab16(port);
4677 	} else {
4678 		port_desc.nv_flags = NV_TYPE_DISABLED;
4679 		port_desc.nv_port = 0;
4680 	}
4681 
4682 	return be_cmd_set_profile_config(adapter, &port_desc,
4683 					 RESOURCE_DESC_SIZE_V1, 1, 1, 0);
4684 }
4685 
4686 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
4687 		     int vf_num)
4688 {
4689 	struct be_mcc_wrb *wrb;
4690 	struct be_cmd_req_get_iface_list *req;
4691 	struct be_cmd_resp_get_iface_list *resp;
4692 	int status;
4693 
4694 	spin_lock_bh(&adapter->mcc_lock);
4695 
4696 	wrb = wrb_from_mccq(adapter);
4697 	if (!wrb) {
4698 		status = -EBUSY;
4699 		goto err;
4700 	}
4701 	req = embedded_payload(wrb);
4702 
4703 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4704 			       OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
4705 			       wrb, NULL);
4706 	req->hdr.domain = vf_num + 1;
4707 
4708 	status = be_mcc_notify_wait(adapter);
4709 	if (!status) {
4710 		resp = (struct be_cmd_resp_get_iface_list *)req;
4711 		vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
4712 	}
4713 
4714 err:
4715 	spin_unlock_bh(&adapter->mcc_lock);
4716 	return status;
4717 }
4718 
4719 static int lancer_wait_idle(struct be_adapter *adapter)
4720 {
4721 #define SLIPORT_IDLE_TIMEOUT 30
4722 	u32 reg_val;
4723 	int status = 0, i;
4724 
4725 	for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
4726 		reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
4727 		if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
4728 			break;
4729 
4730 		ssleep(1);
4731 	}
4732 
4733 	if (i == SLIPORT_IDLE_TIMEOUT)
4734 		status = -1;
4735 
4736 	return status;
4737 }
4738 
4739 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
4740 {
4741 	int status = 0;
4742 
4743 	status = lancer_wait_idle(adapter);
4744 	if (status)
4745 		return status;
4746 
4747 	iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
4748 
4749 	return status;
4750 }
4751 
4752 /* Routine to check whether dump image is present or not */
4753 bool dump_present(struct be_adapter *adapter)
4754 {
4755 	u32 sliport_status = 0;
4756 
4757 	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
4758 	return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
4759 }
4760 
4761 int lancer_initiate_dump(struct be_adapter *adapter)
4762 {
4763 	struct device *dev = &adapter->pdev->dev;
4764 	int status;
4765 
4766 	if (dump_present(adapter)) {
4767 		dev_info(dev, "Previous dump not cleared, not forcing dump\n");
4768 		return -EEXIST;
4769 	}
4770 
4771 	/* give firmware reset and diagnostic dump */
4772 	status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
4773 				     PHYSDEV_CONTROL_DD_MASK);
4774 	if (status < 0) {
4775 		dev_err(dev, "FW reset failed\n");
4776 		return status;
4777 	}
4778 
4779 	status = lancer_wait_idle(adapter);
4780 	if (status)
4781 		return status;
4782 
4783 	if (!dump_present(adapter)) {
4784 		dev_err(dev, "FW dump not generated\n");
4785 		return -EIO;
4786 	}
4787 
4788 	return 0;
4789 }
4790 
4791 int lancer_delete_dump(struct be_adapter *adapter)
4792 {
4793 	int status;
4794 
4795 	status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
4796 	return be_cmd_status(status);
4797 }
4798 
4799 /* Uses sync mcc */
4800 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
4801 {
4802 	struct be_mcc_wrb *wrb;
4803 	struct be_cmd_enable_disable_vf *req;
4804 	int status;
4805 
4806 	if (BEx_chip(adapter))
4807 		return 0;
4808 
4809 	spin_lock_bh(&adapter->mcc_lock);
4810 
4811 	wrb = wrb_from_mccq(adapter);
4812 	if (!wrb) {
4813 		status = -EBUSY;
4814 		goto err;
4815 	}
4816 
4817 	req = embedded_payload(wrb);
4818 
4819 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4820 			       OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
4821 			       wrb, NULL);
4822 
4823 	req->hdr.domain = domain;
4824 	req->enable = 1;
4825 	status = be_mcc_notify_wait(adapter);
4826 err:
4827 	spin_unlock_bh(&adapter->mcc_lock);
4828 	return status;
4829 }
4830 
4831 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
4832 {
4833 	struct be_mcc_wrb *wrb;
4834 	struct be_cmd_req_intr_set *req;
4835 	int status;
4836 
4837 	if (mutex_lock_interruptible(&adapter->mbox_lock))
4838 		return -1;
4839 
4840 	wrb = wrb_from_mbox(adapter);
4841 
4842 	req = embedded_payload(wrb);
4843 
4844 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4845 			       OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
4846 			       wrb, NULL);
4847 
4848 	req->intr_enabled = intr_enable;
4849 
4850 	status = be_mbox_notify_wait(adapter);
4851 
4852 	mutex_unlock(&adapter->mbox_lock);
4853 	return status;
4854 }
4855 
4856 /* Uses MBOX */
4857 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
4858 {
4859 	struct be_cmd_req_get_active_profile *req;
4860 	struct be_mcc_wrb *wrb;
4861 	int status;
4862 
4863 	if (mutex_lock_interruptible(&adapter->mbox_lock))
4864 		return -1;
4865 
4866 	wrb = wrb_from_mbox(adapter);
4867 	if (!wrb) {
4868 		status = -EBUSY;
4869 		goto err;
4870 	}
4871 
4872 	req = embedded_payload(wrb);
4873 
4874 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4875 			       OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
4876 			       wrb, NULL);
4877 
4878 	status = be_mbox_notify_wait(adapter);
4879 	if (!status) {
4880 		struct be_cmd_resp_get_active_profile *resp =
4881 							embedded_payload(wrb);
4882 
4883 		*profile_id = le16_to_cpu(resp->active_profile_id);
4884 	}
4885 
4886 err:
4887 	mutex_unlock(&adapter->mbox_lock);
4888 	return status;
4889 }
4890 
4891 int __be_cmd_set_logical_link_config(struct be_adapter *adapter,
4892 				     int link_state, int version, u8 domain)
4893 {
4894 	struct be_mcc_wrb *wrb;
4895 	struct be_cmd_req_set_ll_link *req;
4896 	int status;
4897 
4898 	spin_lock_bh(&adapter->mcc_lock);
4899 
4900 	wrb = wrb_from_mccq(adapter);
4901 	if (!wrb) {
4902 		status = -EBUSY;
4903 		goto err;
4904 	}
4905 
4906 	req = embedded_payload(wrb);
4907 
4908 	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
4909 			       OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
4910 			       sizeof(*req), wrb, NULL);
4911 
4912 	req->hdr.version = version;
4913 	req->hdr.domain = domain;
4914 
4915 	if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
4916 	    link_state == IFLA_VF_LINK_STATE_AUTO)
4917 		req->link_config |= PLINK_ENABLE;
4918 
4919 	if (link_state == IFLA_VF_LINK_STATE_AUTO)
4920 		req->link_config |= PLINK_TRACK;
4921 
4922 	status = be_mcc_notify_wait(adapter);
4923 err:
4924 	spin_unlock_bh(&adapter->mcc_lock);
4925 	return status;
4926 }
4927 
4928 int be_cmd_set_logical_link_config(struct be_adapter *adapter,
4929 				   int link_state, u8 domain)
4930 {
4931 	int status;
4932 
4933 	if (BEx_chip(adapter))
4934 		return -EOPNOTSUPP;
4935 
4936 	status = __be_cmd_set_logical_link_config(adapter, link_state,
4937 						  2, domain);
4938 
4939 	/* Version 2 of the command will not be recognized by older FW.
4940 	 * On such a failure issue version 1 of the command.
4941 	 */
4942 	if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST)
4943 		status = __be_cmd_set_logical_link_config(adapter, link_state,
4944 							  1, domain);
4945 	return status;
4946 }
4947 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
4948 		    int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
4949 {
4950 	struct be_adapter *adapter = netdev_priv(netdev_handle);
4951 	struct be_mcc_wrb *wrb;
4952 	struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
4953 	struct be_cmd_req_hdr *req;
4954 	struct be_cmd_resp_hdr *resp;
4955 	int status;
4956 
4957 	spin_lock_bh(&adapter->mcc_lock);
4958 
4959 	wrb = wrb_from_mccq(adapter);
4960 	if (!wrb) {
4961 		status = -EBUSY;
4962 		goto err;
4963 	}
4964 	req = embedded_payload(wrb);
4965 	resp = embedded_payload(wrb);
4966 
4967 	be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
4968 			       hdr->opcode, wrb_payload_size, wrb, NULL);
4969 	memcpy(req, wrb_payload, wrb_payload_size);
4970 	be_dws_cpu_to_le(req, wrb_payload_size);
4971 
4972 	status = be_mcc_notify_wait(adapter);
4973 	if (cmd_status)
4974 		*cmd_status = (status & 0xffff);
4975 	if (ext_status)
4976 		*ext_status = 0;
4977 	memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
4978 	be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
4979 err:
4980 	spin_unlock_bh(&adapter->mcc_lock);
4981 	return status;
4982 }
4983 EXPORT_SYMBOL(be_roce_mcc_cmd);
4984