xref: /openbmc/linux/drivers/scsi/lpfc/lpfc_sli.c (revision 48c926cd)
1 
2 /*******************************************************************
3  * This file is part of the Emulex Linux Device Driver for         *
4  * Fibre Channel Host Bus Adapters.                                *
5  * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
6  * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
7  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
8  * EMULEX and SLI are trademarks of Emulex.                        *
9  * www.broadcom.com                                                *
10  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
11  *                                                                 *
12  * This program is free software; you can redistribute it and/or   *
13  * modify it under the terms of version 2 of the GNU General       *
14  * Public License as published by the Free Software Foundation.    *
15  * This program is distributed in the hope that it will be useful. *
16  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
17  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
18  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
19  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
20  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
21  * more details, a copy of which can be found in the file COPYING  *
22  * included with this package.                                     *
23  *******************************************************************/
24 
25 #include <linux/blkdev.h>
26 #include <linux/pci.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 #include <linux/lockdep.h>
31 
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38 #include <linux/aer.h>
39 
40 #include <linux/nvme-fc-driver.h>
41 
42 #include "lpfc_hw4.h"
43 #include "lpfc_hw.h"
44 #include "lpfc_sli.h"
45 #include "lpfc_sli4.h"
46 #include "lpfc_nl.h"
47 #include "lpfc_disc.h"
48 #include "lpfc.h"
49 #include "lpfc_scsi.h"
50 #include "lpfc_nvme.h"
51 #include "lpfc_nvmet.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_compat.h"
55 #include "lpfc_debugfs.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_version.h"
58 
59 /* There are only four IOCB completion types. */
60 typedef enum _lpfc_iocb_type {
61 	LPFC_UNKNOWN_IOCB,
62 	LPFC_UNSOL_IOCB,
63 	LPFC_SOL_IOCB,
64 	LPFC_ABORT_IOCB
65 } lpfc_iocb_type;
66 
67 
68 /* Provide function prototypes local to this module. */
69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
70 				  uint32_t);
71 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
72 			      uint8_t *, uint32_t *);
73 static struct lpfc_iocbq *lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *,
74 							 struct lpfc_iocbq *);
75 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76 				      struct hbq_dmabuf *);
77 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 					  struct hbq_dmabuf *dmabuf);
79 static int lpfc_sli4_fp_handle_cqe(struct lpfc_hba *, struct lpfc_queue *,
80 				    struct lpfc_cqe *);
81 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
82 				       int);
83 static int lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84 				    struct lpfc_eqe *eqe, uint32_t qidx);
85 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
86 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
87 static int lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba,
88 				   struct lpfc_sli_ring *pring,
89 				   struct lpfc_iocbq *cmdiocb);
90 
91 static IOCB_t *
92 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
93 {
94 	return &iocbq->iocb;
95 }
96 
97 /**
98  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
99  * @q: The Work Queue to operate on.
100  * @wqe: The work Queue Entry to put on the Work queue.
101  *
102  * This routine will copy the contents of @wqe to the next available entry on
103  * the @q. This function will then ring the Work Queue Doorbell to signal the
104  * HBA to start processing the Work Queue Entry. This function returns 0 if
105  * successful. If no entries are available on @q then this function will return
106  * -ENOMEM.
107  * The caller is expected to hold the hbalock when calling this routine.
108  **/
109 static int
110 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
111 {
112 	union lpfc_wqe *temp_wqe;
113 	struct lpfc_register doorbell;
114 	uint32_t host_index;
115 	uint32_t idx;
116 
117 	/* sanity check on queue memory */
118 	if (unlikely(!q))
119 		return -ENOMEM;
120 	temp_wqe = q->qe[q->host_index].wqe;
121 
122 	/* If the host has not yet processed the next entry then we are done */
123 	idx = ((q->host_index + 1) % q->entry_count);
124 	if (idx == q->hba_index) {
125 		q->WQ_overflow++;
126 		return -EBUSY;
127 	}
128 	q->WQ_posted++;
129 	/* set consumption flag every once in a while */
130 	if (!((q->host_index + 1) % q->entry_repost))
131 		bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
132 	if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
133 		bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
134 	lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
135 	/* ensure WQE bcopy flushed before doorbell write */
136 	wmb();
137 
138 	/* Update the host index before invoking device */
139 	host_index = q->host_index;
140 
141 	q->host_index = idx;
142 
143 	/* Ring Doorbell */
144 	doorbell.word0 = 0;
145 	if (q->db_format == LPFC_DB_LIST_FORMAT) {
146 		bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
147 		bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
148 		bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
149 	} else if (q->db_format == LPFC_DB_RING_FORMAT) {
150 		bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
151 		bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
152 	} else {
153 		return -EINVAL;
154 	}
155 	writel(doorbell.word0, q->db_regaddr);
156 
157 	return 0;
158 }
159 
160 /**
161  * lpfc_sli4_wq_release - Updates internal hba index for WQ
162  * @q: The Work Queue to operate on.
163  * @index: The index to advance the hba index to.
164  *
165  * This routine will update the HBA index of a queue to reflect consumption of
166  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
167  * an entry the host calls this function to update the queue's internal
168  * pointers. This routine returns the number of entries that were consumed by
169  * the HBA.
170  **/
171 static uint32_t
172 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
173 {
174 	uint32_t released = 0;
175 
176 	/* sanity check on queue memory */
177 	if (unlikely(!q))
178 		return 0;
179 
180 	if (q->hba_index == index)
181 		return 0;
182 	do {
183 		q->hba_index = ((q->hba_index + 1) % q->entry_count);
184 		released++;
185 	} while (q->hba_index != index);
186 	return released;
187 }
188 
189 /**
190  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
191  * @q: The Mailbox Queue to operate on.
192  * @wqe: The Mailbox Queue Entry to put on the Work queue.
193  *
194  * This routine will copy the contents of @mqe to the next available entry on
195  * the @q. This function will then ring the Work Queue Doorbell to signal the
196  * HBA to start processing the Work Queue Entry. This function returns 0 if
197  * successful. If no entries are available on @q then this function will return
198  * -ENOMEM.
199  * The caller is expected to hold the hbalock when calling this routine.
200  **/
201 static uint32_t
202 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
203 {
204 	struct lpfc_mqe *temp_mqe;
205 	struct lpfc_register doorbell;
206 
207 	/* sanity check on queue memory */
208 	if (unlikely(!q))
209 		return -ENOMEM;
210 	temp_mqe = q->qe[q->host_index].mqe;
211 
212 	/* If the host has not yet processed the next entry then we are done */
213 	if (((q->host_index + 1) % q->entry_count) == q->hba_index)
214 		return -ENOMEM;
215 	lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
216 	/* Save off the mailbox pointer for completion */
217 	q->phba->mbox = (MAILBOX_t *)temp_mqe;
218 
219 	/* Update the host index before invoking device */
220 	q->host_index = ((q->host_index + 1) % q->entry_count);
221 
222 	/* Ring Doorbell */
223 	doorbell.word0 = 0;
224 	bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
225 	bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
226 	writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
227 	return 0;
228 }
229 
230 /**
231  * lpfc_sli4_mq_release - Updates internal hba index for MQ
232  * @q: The Mailbox Queue to operate on.
233  *
234  * This routine will update the HBA index of a queue to reflect consumption of
235  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
236  * an entry the host calls this function to update the queue's internal
237  * pointers. This routine returns the number of entries that were consumed by
238  * the HBA.
239  **/
240 static uint32_t
241 lpfc_sli4_mq_release(struct lpfc_queue *q)
242 {
243 	/* sanity check on queue memory */
244 	if (unlikely(!q))
245 		return 0;
246 
247 	/* Clear the mailbox pointer for completion */
248 	q->phba->mbox = NULL;
249 	q->hba_index = ((q->hba_index + 1) % q->entry_count);
250 	return 1;
251 }
252 
253 /**
254  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
255  * @q: The Event Queue to get the first valid EQE from
256  *
257  * This routine will get the first valid Event Queue Entry from @q, update
258  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
259  * the Queue (no more work to do), or the Queue is full of EQEs that have been
260  * processed, but not popped back to the HBA then this routine will return NULL.
261  **/
262 static struct lpfc_eqe *
263 lpfc_sli4_eq_get(struct lpfc_queue *q)
264 {
265 	struct lpfc_eqe *eqe;
266 	uint32_t idx;
267 
268 	/* sanity check on queue memory */
269 	if (unlikely(!q))
270 		return NULL;
271 	eqe = q->qe[q->hba_index].eqe;
272 
273 	/* If the next EQE is not valid then we are done */
274 	if (!bf_get_le32(lpfc_eqe_valid, eqe))
275 		return NULL;
276 	/* If the host has not yet processed the next entry then we are done */
277 	idx = ((q->hba_index + 1) % q->entry_count);
278 	if (idx == q->host_index)
279 		return NULL;
280 
281 	q->hba_index = idx;
282 
283 	/*
284 	 * insert barrier for instruction interlock : data from the hardware
285 	 * must have the valid bit checked before it can be copied and acted
286 	 * upon. Speculative instructions were allowing a bcopy at the start
287 	 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
288 	 * after our return, to copy data before the valid bit check above
289 	 * was done. As such, some of the copied data was stale. The barrier
290 	 * ensures the check is before any data is copied.
291 	 */
292 	mb();
293 	return eqe;
294 }
295 
296 /**
297  * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
298  * @q: The Event Queue to disable interrupts
299  *
300  **/
301 static inline void
302 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
303 {
304 	struct lpfc_register doorbell;
305 
306 	doorbell.word0 = 0;
307 	bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
308 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
309 	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
310 		(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
311 	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
312 	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
313 }
314 
315 /**
316  * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
317  * @q: The Event Queue that the host has completed processing for.
318  * @arm: Indicates whether the host wants to arms this CQ.
319  *
320  * This routine will mark all Event Queue Entries on @q, from the last
321  * known completed entry to the last entry that was processed, as completed
322  * by clearing the valid bit for each completion queue entry. Then it will
323  * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
324  * The internal host index in the @q will be updated by this routine to indicate
325  * that the host has finished processing the entries. The @arm parameter
326  * indicates that the queue should be rearmed when ringing the doorbell.
327  *
328  * This function will return the number of EQEs that were popped.
329  **/
330 uint32_t
331 lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
332 {
333 	uint32_t released = 0;
334 	struct lpfc_eqe *temp_eqe;
335 	struct lpfc_register doorbell;
336 
337 	/* sanity check on queue memory */
338 	if (unlikely(!q))
339 		return 0;
340 
341 	/* while there are valid entries */
342 	while (q->hba_index != q->host_index) {
343 		temp_eqe = q->qe[q->host_index].eqe;
344 		bf_set_le32(lpfc_eqe_valid, temp_eqe, 0);
345 		released++;
346 		q->host_index = ((q->host_index + 1) % q->entry_count);
347 	}
348 	if (unlikely(released == 0 && !arm))
349 		return 0;
350 
351 	/* ring doorbell for number popped */
352 	doorbell.word0 = 0;
353 	if (arm) {
354 		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
355 		bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
356 	}
357 	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
358 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
359 	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
360 			(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
361 	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
362 	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
363 	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
364 	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
365 		readl(q->phba->sli4_hba.EQCQDBregaddr);
366 	return released;
367 }
368 
369 /**
370  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
371  * @q: The Completion Queue to get the first valid CQE from
372  *
373  * This routine will get the first valid Completion Queue Entry from @q, update
374  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
375  * the Queue (no more work to do), or the Queue is full of CQEs that have been
376  * processed, but not popped back to the HBA then this routine will return NULL.
377  **/
378 static struct lpfc_cqe *
379 lpfc_sli4_cq_get(struct lpfc_queue *q)
380 {
381 	struct lpfc_cqe *cqe;
382 	uint32_t idx;
383 
384 	/* sanity check on queue memory */
385 	if (unlikely(!q))
386 		return NULL;
387 
388 	/* If the next CQE is not valid then we are done */
389 	if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
390 		return NULL;
391 	/* If the host has not yet processed the next entry then we are done */
392 	idx = ((q->hba_index + 1) % q->entry_count);
393 	if (idx == q->host_index)
394 		return NULL;
395 
396 	cqe = q->qe[q->hba_index].cqe;
397 	q->hba_index = idx;
398 
399 	/*
400 	 * insert barrier for instruction interlock : data from the hardware
401 	 * must have the valid bit checked before it can be copied and acted
402 	 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
403 	 * instructions allowing action on content before valid bit checked,
404 	 * add barrier here as well. May not be needed as "content" is a
405 	 * single 32-bit entity here (vs multi word structure for cq's).
406 	 */
407 	mb();
408 	return cqe;
409 }
410 
411 /**
412  * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
413  * @q: The Completion Queue that the host has completed processing for.
414  * @arm: Indicates whether the host wants to arms this CQ.
415  *
416  * This routine will mark all Completion queue entries on @q, from the last
417  * known completed entry to the last entry that was processed, as completed
418  * by clearing the valid bit for each completion queue entry. Then it will
419  * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
420  * The internal host index in the @q will be updated by this routine to indicate
421  * that the host has finished processing the entries. The @arm parameter
422  * indicates that the queue should be rearmed when ringing the doorbell.
423  *
424  * This function will return the number of CQEs that were released.
425  **/
426 uint32_t
427 lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
428 {
429 	uint32_t released = 0;
430 	struct lpfc_cqe *temp_qe;
431 	struct lpfc_register doorbell;
432 
433 	/* sanity check on queue memory */
434 	if (unlikely(!q))
435 		return 0;
436 	/* while there are valid entries */
437 	while (q->hba_index != q->host_index) {
438 		temp_qe = q->qe[q->host_index].cqe;
439 		bf_set_le32(lpfc_cqe_valid, temp_qe, 0);
440 		released++;
441 		q->host_index = ((q->host_index + 1) % q->entry_count);
442 	}
443 	if (unlikely(released == 0 && !arm))
444 		return 0;
445 
446 	/* ring doorbell for number popped */
447 	doorbell.word0 = 0;
448 	if (arm)
449 		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
450 	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
451 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
452 	bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
453 			(q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
454 	bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
455 	writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
456 	return released;
457 }
458 
459 /**
460  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
461  * @q: The Header Receive Queue to operate on.
462  * @wqe: The Receive Queue Entry to put on the Receive queue.
463  *
464  * This routine will copy the contents of @wqe to the next available entry on
465  * the @q. This function will then ring the Receive Queue Doorbell to signal the
466  * HBA to start processing the Receive Queue Entry. This function returns the
467  * index that the rqe was copied to if successful. If no entries are available
468  * on @q then this function will return -ENOMEM.
469  * The caller is expected to hold the hbalock when calling this routine.
470  **/
471 int
472 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
473 		 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
474 {
475 	struct lpfc_rqe *temp_hrqe;
476 	struct lpfc_rqe *temp_drqe;
477 	struct lpfc_register doorbell;
478 	int put_index;
479 
480 	/* sanity check on queue memory */
481 	if (unlikely(!hq) || unlikely(!dq))
482 		return -ENOMEM;
483 	put_index = hq->host_index;
484 	temp_hrqe = hq->qe[put_index].rqe;
485 	temp_drqe = dq->qe[dq->host_index].rqe;
486 
487 	if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
488 		return -EINVAL;
489 	if (put_index != dq->host_index)
490 		return -EINVAL;
491 	/* If the host has not yet processed the next entry then we are done */
492 	if (((put_index + 1) % hq->entry_count) == hq->hba_index)
493 		return -EBUSY;
494 	lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
495 	lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
496 
497 	/* Update the host index to point to the next slot */
498 	hq->host_index = ((put_index + 1) % hq->entry_count);
499 	dq->host_index = ((dq->host_index + 1) % dq->entry_count);
500 	hq->RQ_buf_posted++;
501 
502 	/* Ring The Header Receive Queue Doorbell */
503 	if (!(hq->host_index % hq->entry_repost)) {
504 		doorbell.word0 = 0;
505 		if (hq->db_format == LPFC_DB_RING_FORMAT) {
506 			bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
507 			       hq->entry_repost);
508 			bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
509 		} else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
510 			bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
511 			       hq->entry_repost);
512 			bf_set(lpfc_rq_db_list_fm_index, &doorbell,
513 			       hq->host_index);
514 			bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
515 		} else {
516 			return -EINVAL;
517 		}
518 		writel(doorbell.word0, hq->db_regaddr);
519 	}
520 	return put_index;
521 }
522 
523 /**
524  * lpfc_sli4_rq_release - Updates internal hba index for RQ
525  * @q: The Header Receive Queue to operate on.
526  *
527  * This routine will update the HBA index of a queue to reflect consumption of
528  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
529  * consumed an entry the host calls this function to update the queue's
530  * internal pointers. This routine returns the number of entries that were
531  * consumed by the HBA.
532  **/
533 static uint32_t
534 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
535 {
536 	/* sanity check on queue memory */
537 	if (unlikely(!hq) || unlikely(!dq))
538 		return 0;
539 
540 	if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
541 		return 0;
542 	hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
543 	dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
544 	return 1;
545 }
546 
547 /**
548  * lpfc_cmd_iocb - Get next command iocb entry in the ring
549  * @phba: Pointer to HBA context object.
550  * @pring: Pointer to driver SLI ring object.
551  *
552  * This function returns pointer to next command iocb entry
553  * in the command ring. The caller must hold hbalock to prevent
554  * other threads consume the next command iocb.
555  * SLI-2/SLI-3 provide different sized iocbs.
556  **/
557 static inline IOCB_t *
558 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
559 {
560 	return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
561 			   pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
562 }
563 
564 /**
565  * lpfc_resp_iocb - Get next response iocb entry in the ring
566  * @phba: Pointer to HBA context object.
567  * @pring: Pointer to driver SLI ring object.
568  *
569  * This function returns pointer to next response iocb entry
570  * in the response ring. The caller must hold hbalock to make sure
571  * that no other thread consume the next response iocb.
572  * SLI-2/SLI-3 provide different sized iocbs.
573  **/
574 static inline IOCB_t *
575 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
576 {
577 	return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
578 			   pring->sli.sli3.rspidx * phba->iocb_rsp_size);
579 }
580 
581 /**
582  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
583  * @phba: Pointer to HBA context object.
584  *
585  * This function is called with hbalock held. This function
586  * allocates a new driver iocb object from the iocb pool. If the
587  * allocation is successful, it returns pointer to the newly
588  * allocated iocb object else it returns NULL.
589  **/
590 struct lpfc_iocbq *
591 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
592 {
593 	struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
594 	struct lpfc_iocbq * iocbq = NULL;
595 
596 	lockdep_assert_held(&phba->hbalock);
597 
598 	list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
599 	if (iocbq)
600 		phba->iocb_cnt++;
601 	if (phba->iocb_cnt > phba->iocb_max)
602 		phba->iocb_max = phba->iocb_cnt;
603 	return iocbq;
604 }
605 
606 /**
607  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
608  * @phba: Pointer to HBA context object.
609  * @xritag: XRI value.
610  *
611  * This function clears the sglq pointer from the array of acive
612  * sglq's. The xritag that is passed in is used to index into the
613  * array. Before the xritag can be used it needs to be adjusted
614  * by subtracting the xribase.
615  *
616  * Returns sglq ponter = success, NULL = Failure.
617  **/
618 struct lpfc_sglq *
619 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
620 {
621 	struct lpfc_sglq *sglq;
622 
623 	sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
624 	phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
625 	return sglq;
626 }
627 
628 /**
629  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
630  * @phba: Pointer to HBA context object.
631  * @xritag: XRI value.
632  *
633  * This function returns the sglq pointer from the array of acive
634  * sglq's. The xritag that is passed in is used to index into the
635  * array. Before the xritag can be used it needs to be adjusted
636  * by subtracting the xribase.
637  *
638  * Returns sglq ponter = success, NULL = Failure.
639  **/
640 struct lpfc_sglq *
641 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
642 {
643 	struct lpfc_sglq *sglq;
644 
645 	sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
646 	return sglq;
647 }
648 
649 /**
650  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
651  * @phba: Pointer to HBA context object.
652  * @xritag: xri used in this exchange.
653  * @rrq: The RRQ to be cleared.
654  *
655  **/
656 void
657 lpfc_clr_rrq_active(struct lpfc_hba *phba,
658 		    uint16_t xritag,
659 		    struct lpfc_node_rrq *rrq)
660 {
661 	struct lpfc_nodelist *ndlp = NULL;
662 
663 	if ((rrq->vport) && NLP_CHK_NODE_ACT(rrq->ndlp))
664 		ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
665 
666 	/* The target DID could have been swapped (cable swap)
667 	 * we should use the ndlp from the findnode if it is
668 	 * available.
669 	 */
670 	if ((!ndlp) && rrq->ndlp)
671 		ndlp = rrq->ndlp;
672 
673 	if (!ndlp)
674 		goto out;
675 
676 	if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
677 		rrq->send_rrq = 0;
678 		rrq->xritag = 0;
679 		rrq->rrq_stop_time = 0;
680 	}
681 out:
682 	mempool_free(rrq, phba->rrq_pool);
683 }
684 
685 /**
686  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
687  * @phba: Pointer to HBA context object.
688  *
689  * This function is called with hbalock held. This function
690  * Checks if stop_time (ratov from setting rrq active) has
691  * been reached, if it has and the send_rrq flag is set then
692  * it will call lpfc_send_rrq. If the send_rrq flag is not set
693  * then it will just call the routine to clear the rrq and
694  * free the rrq resource.
695  * The timer is set to the next rrq that is going to expire before
696  * leaving the routine.
697  *
698  **/
699 void
700 lpfc_handle_rrq_active(struct lpfc_hba *phba)
701 {
702 	struct lpfc_node_rrq *rrq;
703 	struct lpfc_node_rrq *nextrrq;
704 	unsigned long next_time;
705 	unsigned long iflags;
706 	LIST_HEAD(send_rrq);
707 
708 	spin_lock_irqsave(&phba->hbalock, iflags);
709 	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
710 	next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
711 	list_for_each_entry_safe(rrq, nextrrq,
712 				 &phba->active_rrq_list, list) {
713 		if (time_after(jiffies, rrq->rrq_stop_time))
714 			list_move(&rrq->list, &send_rrq);
715 		else if (time_before(rrq->rrq_stop_time, next_time))
716 			next_time = rrq->rrq_stop_time;
717 	}
718 	spin_unlock_irqrestore(&phba->hbalock, iflags);
719 	if ((!list_empty(&phba->active_rrq_list)) &&
720 	    (!(phba->pport->load_flag & FC_UNLOADING)))
721 		mod_timer(&phba->rrq_tmr, next_time);
722 	list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
723 		list_del(&rrq->list);
724 		if (!rrq->send_rrq)
725 			/* this call will free the rrq */
726 		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
727 		else if (lpfc_send_rrq(phba, rrq)) {
728 			/* if we send the rrq then the completion handler
729 			*  will clear the bit in the xribitmap.
730 			*/
731 			lpfc_clr_rrq_active(phba, rrq->xritag,
732 					    rrq);
733 		}
734 	}
735 }
736 
737 /**
738  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
739  * @vport: Pointer to vport context object.
740  * @xri: The xri used in the exchange.
741  * @did: The targets DID for this exchange.
742  *
743  * returns NULL = rrq not found in the phba->active_rrq_list.
744  *         rrq = rrq for this xri and target.
745  **/
746 struct lpfc_node_rrq *
747 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
748 {
749 	struct lpfc_hba *phba = vport->phba;
750 	struct lpfc_node_rrq *rrq;
751 	struct lpfc_node_rrq *nextrrq;
752 	unsigned long iflags;
753 
754 	if (phba->sli_rev != LPFC_SLI_REV4)
755 		return NULL;
756 	spin_lock_irqsave(&phba->hbalock, iflags);
757 	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
758 		if (rrq->vport == vport && rrq->xritag == xri &&
759 				rrq->nlp_DID == did){
760 			list_del(&rrq->list);
761 			spin_unlock_irqrestore(&phba->hbalock, iflags);
762 			return rrq;
763 		}
764 	}
765 	spin_unlock_irqrestore(&phba->hbalock, iflags);
766 	return NULL;
767 }
768 
769 /**
770  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
771  * @vport: Pointer to vport context object.
772  * @ndlp: Pointer to the lpfc_node_list structure.
773  * If ndlp is NULL Remove all active RRQs for this vport from the
774  * phba->active_rrq_list and clear the rrq.
775  * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
776  **/
777 void
778 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
779 
780 {
781 	struct lpfc_hba *phba = vport->phba;
782 	struct lpfc_node_rrq *rrq;
783 	struct lpfc_node_rrq *nextrrq;
784 	unsigned long iflags;
785 	LIST_HEAD(rrq_list);
786 
787 	if (phba->sli_rev != LPFC_SLI_REV4)
788 		return;
789 	if (!ndlp) {
790 		lpfc_sli4_vport_delete_els_xri_aborted(vport);
791 		lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
792 	}
793 	spin_lock_irqsave(&phba->hbalock, iflags);
794 	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list)
795 		if ((rrq->vport == vport) && (!ndlp  || rrq->ndlp == ndlp))
796 			list_move(&rrq->list, &rrq_list);
797 	spin_unlock_irqrestore(&phba->hbalock, iflags);
798 
799 	list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
800 		list_del(&rrq->list);
801 		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
802 	}
803 }
804 
805 /**
806  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
807  * @phba: Pointer to HBA context object.
808  * @ndlp: Targets nodelist pointer for this exchange.
809  * @xritag the xri in the bitmap to test.
810  *
811  * This function is called with hbalock held. This function
812  * returns 0 = rrq not active for this xri
813  *         1 = rrq is valid for this xri.
814  **/
815 int
816 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
817 			uint16_t  xritag)
818 {
819 	lockdep_assert_held(&phba->hbalock);
820 	if (!ndlp)
821 		return 0;
822 	if (!ndlp->active_rrqs_xri_bitmap)
823 		return 0;
824 	if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
825 			return 1;
826 	else
827 		return 0;
828 }
829 
830 /**
831  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
832  * @phba: Pointer to HBA context object.
833  * @ndlp: nodelist pointer for this target.
834  * @xritag: xri used in this exchange.
835  * @rxid: Remote Exchange ID.
836  * @send_rrq: Flag used to determine if we should send rrq els cmd.
837  *
838  * This function takes the hbalock.
839  * The active bit is always set in the active rrq xri_bitmap even
840  * if there is no slot avaiable for the other rrq information.
841  *
842  * returns 0 rrq actived for this xri
843  *         < 0 No memory or invalid ndlp.
844  **/
845 int
846 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
847 		    uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
848 {
849 	unsigned long iflags;
850 	struct lpfc_node_rrq *rrq;
851 	int empty;
852 
853 	if (!ndlp)
854 		return -EINVAL;
855 
856 	if (!phba->cfg_enable_rrq)
857 		return -EINVAL;
858 
859 	spin_lock_irqsave(&phba->hbalock, iflags);
860 	if (phba->pport->load_flag & FC_UNLOADING) {
861 		phba->hba_flag &= ~HBA_RRQ_ACTIVE;
862 		goto out;
863 	}
864 
865 	/*
866 	 * set the active bit even if there is no mem available.
867 	 */
868 	if (NLP_CHK_FREE_REQ(ndlp))
869 		goto out;
870 
871 	if (ndlp->vport && (ndlp->vport->load_flag & FC_UNLOADING))
872 		goto out;
873 
874 	if (!ndlp->active_rrqs_xri_bitmap)
875 		goto out;
876 
877 	if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
878 		goto out;
879 
880 	spin_unlock_irqrestore(&phba->hbalock, iflags);
881 	rrq = mempool_alloc(phba->rrq_pool, GFP_KERNEL);
882 	if (!rrq) {
883 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
884 				"3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
885 				" DID:0x%x Send:%d\n",
886 				xritag, rxid, ndlp->nlp_DID, send_rrq);
887 		return -EINVAL;
888 	}
889 	if (phba->cfg_enable_rrq == 1)
890 		rrq->send_rrq = send_rrq;
891 	else
892 		rrq->send_rrq = 0;
893 	rrq->xritag = xritag;
894 	rrq->rrq_stop_time = jiffies +
895 				msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
896 	rrq->ndlp = ndlp;
897 	rrq->nlp_DID = ndlp->nlp_DID;
898 	rrq->vport = ndlp->vport;
899 	rrq->rxid = rxid;
900 	spin_lock_irqsave(&phba->hbalock, iflags);
901 	empty = list_empty(&phba->active_rrq_list);
902 	list_add_tail(&rrq->list, &phba->active_rrq_list);
903 	phba->hba_flag |= HBA_RRQ_ACTIVE;
904 	if (empty)
905 		lpfc_worker_wake_up(phba);
906 	spin_unlock_irqrestore(&phba->hbalock, iflags);
907 	return 0;
908 out:
909 	spin_unlock_irqrestore(&phba->hbalock, iflags);
910 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
911 			"2921 Can't set rrq active xri:0x%x rxid:0x%x"
912 			" DID:0x%x Send:%d\n",
913 			xritag, rxid, ndlp->nlp_DID, send_rrq);
914 	return -EINVAL;
915 }
916 
917 /**
918  * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
919  * @phba: Pointer to HBA context object.
920  * @piocb: Pointer to the iocbq.
921  *
922  * This function is called with the ring lock held. This function
923  * gets a new driver sglq object from the sglq list. If the
924  * list is not empty then it is successful, it returns pointer to the newly
925  * allocated sglq object else it returns NULL.
926  **/
927 static struct lpfc_sglq *
928 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
929 {
930 	struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
931 	struct lpfc_sglq *sglq = NULL;
932 	struct lpfc_sglq *start_sglq = NULL;
933 	struct lpfc_scsi_buf *lpfc_cmd;
934 	struct lpfc_nodelist *ndlp;
935 	int found = 0;
936 
937 	lockdep_assert_held(&phba->hbalock);
938 
939 	if (piocbq->iocb_flag &  LPFC_IO_FCP) {
940 		lpfc_cmd = (struct lpfc_scsi_buf *) piocbq->context1;
941 		ndlp = lpfc_cmd->rdata->pnode;
942 	} else  if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) &&
943 			!(piocbq->iocb_flag & LPFC_IO_LIBDFC)) {
944 		ndlp = piocbq->context_un.ndlp;
945 	} else  if (piocbq->iocb_flag & LPFC_IO_LIBDFC) {
946 		if (piocbq->iocb_flag & LPFC_IO_LOOPBACK)
947 			ndlp = NULL;
948 		else
949 			ndlp = piocbq->context_un.ndlp;
950 	} else {
951 		ndlp = piocbq->context1;
952 	}
953 
954 	spin_lock(&phba->sli4_hba.sgl_list_lock);
955 	list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
956 	start_sglq = sglq;
957 	while (!found) {
958 		if (!sglq)
959 			break;
960 		if (ndlp && ndlp->active_rrqs_xri_bitmap &&
961 		    test_bit(sglq->sli4_lxritag,
962 		    ndlp->active_rrqs_xri_bitmap)) {
963 			/* This xri has an rrq outstanding for this DID.
964 			 * put it back in the list and get another xri.
965 			 */
966 			list_add_tail(&sglq->list, lpfc_els_sgl_list);
967 			sglq = NULL;
968 			list_remove_head(lpfc_els_sgl_list, sglq,
969 						struct lpfc_sglq, list);
970 			if (sglq == start_sglq) {
971 				list_add_tail(&sglq->list, lpfc_els_sgl_list);
972 				sglq = NULL;
973 				break;
974 			} else
975 				continue;
976 		}
977 		sglq->ndlp = ndlp;
978 		found = 1;
979 		phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
980 		sglq->state = SGL_ALLOCATED;
981 	}
982 	spin_unlock(&phba->sli4_hba.sgl_list_lock);
983 	return sglq;
984 }
985 
986 /**
987  * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
988  * @phba: Pointer to HBA context object.
989  * @piocb: Pointer to the iocbq.
990  *
991  * This function is called with the sgl_list lock held. This function
992  * gets a new driver sglq object from the sglq list. If the
993  * list is not empty then it is successful, it returns pointer to the newly
994  * allocated sglq object else it returns NULL.
995  **/
996 struct lpfc_sglq *
997 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
998 {
999 	struct list_head *lpfc_nvmet_sgl_list;
1000 	struct lpfc_sglq *sglq = NULL;
1001 
1002 	lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1003 
1004 	lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1005 
1006 	list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1007 	if (!sglq)
1008 		return NULL;
1009 	phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1010 	sglq->state = SGL_ALLOCATED;
1011 	return sglq;
1012 }
1013 
1014 /**
1015  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1016  * @phba: Pointer to HBA context object.
1017  *
1018  * This function is called with no lock held. This function
1019  * allocates a new driver iocb object from the iocb pool. If the
1020  * allocation is successful, it returns pointer to the newly
1021  * allocated iocb object else it returns NULL.
1022  **/
1023 struct lpfc_iocbq *
1024 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1025 {
1026 	struct lpfc_iocbq * iocbq = NULL;
1027 	unsigned long iflags;
1028 
1029 	spin_lock_irqsave(&phba->hbalock, iflags);
1030 	iocbq = __lpfc_sli_get_iocbq(phba);
1031 	spin_unlock_irqrestore(&phba->hbalock, iflags);
1032 	return iocbq;
1033 }
1034 
1035 /**
1036  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1037  * @phba: Pointer to HBA context object.
1038  * @iocbq: Pointer to driver iocb object.
1039  *
1040  * This function is called with hbalock held to release driver
1041  * iocb object to the iocb pool. The iotag in the iocb object
1042  * does not change for each use of the iocb object. This function
1043  * clears all other fields of the iocb object when it is freed.
1044  * The sqlq structure that holds the xritag and phys and virtual
1045  * mappings for the scatter gather list is retrieved from the
1046  * active array of sglq. The get of the sglq pointer also clears
1047  * the entry in the array. If the status of the IO indiactes that
1048  * this IO was aborted then the sglq entry it put on the
1049  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1050  * IO has good status or fails for any other reason then the sglq
1051  * entry is added to the free list (lpfc_els_sgl_list).
1052  **/
1053 static void
1054 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1055 {
1056 	struct lpfc_sglq *sglq;
1057 	size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1058 	unsigned long iflag = 0;
1059 	struct lpfc_sli_ring *pring;
1060 
1061 	lockdep_assert_held(&phba->hbalock);
1062 
1063 	if (iocbq->sli4_xritag == NO_XRI)
1064 		sglq = NULL;
1065 	else
1066 		sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1067 
1068 
1069 	if (sglq)  {
1070 		if (iocbq->iocb_flag & LPFC_IO_NVMET) {
1071 			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1072 					  iflag);
1073 			sglq->state = SGL_FREED;
1074 			sglq->ndlp = NULL;
1075 			list_add_tail(&sglq->list,
1076 				      &phba->sli4_hba.lpfc_nvmet_sgl_list);
1077 			spin_unlock_irqrestore(
1078 				&phba->sli4_hba.sgl_list_lock, iflag);
1079 			goto out;
1080 		}
1081 
1082 		pring = phba->sli4_hba.els_wq->pring;
1083 		if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
1084 			(sglq->state != SGL_XRI_ABORTED)) {
1085 			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1086 					  iflag);
1087 			list_add(&sglq->list,
1088 				 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1089 			spin_unlock_irqrestore(
1090 				&phba->sli4_hba.sgl_list_lock, iflag);
1091 		} else {
1092 			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1093 					  iflag);
1094 			sglq->state = SGL_FREED;
1095 			sglq->ndlp = NULL;
1096 			list_add_tail(&sglq->list,
1097 				      &phba->sli4_hba.lpfc_els_sgl_list);
1098 			spin_unlock_irqrestore(
1099 				&phba->sli4_hba.sgl_list_lock, iflag);
1100 
1101 			/* Check if TXQ queue needs to be serviced */
1102 			if (!list_empty(&pring->txq))
1103 				lpfc_worker_wake_up(phba);
1104 		}
1105 	}
1106 
1107 out:
1108 	/*
1109 	 * Clean all volatile data fields, preserve iotag and node struct.
1110 	 */
1111 	memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1112 	iocbq->sli4_lxritag = NO_XRI;
1113 	iocbq->sli4_xritag = NO_XRI;
1114 	iocbq->iocb_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET |
1115 			      LPFC_IO_NVME_LS);
1116 	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1117 }
1118 
1119 
1120 /**
1121  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1122  * @phba: Pointer to HBA context object.
1123  * @iocbq: Pointer to driver iocb object.
1124  *
1125  * This function is called with hbalock held to release driver
1126  * iocb object to the iocb pool. The iotag in the iocb object
1127  * does not change for each use of the iocb object. This function
1128  * clears all other fields of the iocb object when it is freed.
1129  **/
1130 static void
1131 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1132 {
1133 	size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
1134 
1135 	lockdep_assert_held(&phba->hbalock);
1136 
1137 	/*
1138 	 * Clean all volatile data fields, preserve iotag and node struct.
1139 	 */
1140 	memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
1141 	iocbq->sli4_xritag = NO_XRI;
1142 	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1143 }
1144 
1145 /**
1146  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1147  * @phba: Pointer to HBA context object.
1148  * @iocbq: Pointer to driver iocb object.
1149  *
1150  * This function is called with hbalock held to release driver
1151  * iocb object to the iocb pool. The iotag in the iocb object
1152  * does not change for each use of the iocb object. This function
1153  * clears all other fields of the iocb object when it is freed.
1154  **/
1155 static void
1156 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1157 {
1158 	lockdep_assert_held(&phba->hbalock);
1159 
1160 	phba->__lpfc_sli_release_iocbq(phba, iocbq);
1161 	phba->iocb_cnt--;
1162 }
1163 
1164 /**
1165  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1166  * @phba: Pointer to HBA context object.
1167  * @iocbq: Pointer to driver iocb object.
1168  *
1169  * This function is called with no lock held to release the iocb to
1170  * iocb pool.
1171  **/
1172 void
1173 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1174 {
1175 	unsigned long iflags;
1176 
1177 	/*
1178 	 * Clean all volatile data fields, preserve iotag and node struct.
1179 	 */
1180 	spin_lock_irqsave(&phba->hbalock, iflags);
1181 	__lpfc_sli_release_iocbq(phba, iocbq);
1182 	spin_unlock_irqrestore(&phba->hbalock, iflags);
1183 }
1184 
1185 /**
1186  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1187  * @phba: Pointer to HBA context object.
1188  * @iocblist: List of IOCBs.
1189  * @ulpstatus: ULP status in IOCB command field.
1190  * @ulpWord4: ULP word-4 in IOCB command field.
1191  *
1192  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1193  * on the list by invoking the complete callback function associated with the
1194  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1195  * fields.
1196  **/
1197 void
1198 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1199 		      uint32_t ulpstatus, uint32_t ulpWord4)
1200 {
1201 	struct lpfc_iocbq *piocb;
1202 
1203 	while (!list_empty(iocblist)) {
1204 		list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1205 		if (!piocb->iocb_cmpl)
1206 			lpfc_sli_release_iocbq(phba, piocb);
1207 		else {
1208 			piocb->iocb.ulpStatus = ulpstatus;
1209 			piocb->iocb.un.ulpWord[4] = ulpWord4;
1210 			(piocb->iocb_cmpl) (phba, piocb, piocb);
1211 		}
1212 	}
1213 	return;
1214 }
1215 
1216 /**
1217  * lpfc_sli_iocb_cmd_type - Get the iocb type
1218  * @iocb_cmnd: iocb command code.
1219  *
1220  * This function is called by ring event handler function to get the iocb type.
1221  * This function translates the iocb command to an iocb command type used to
1222  * decide the final disposition of each completed IOCB.
1223  * The function returns
1224  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1225  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1226  * LPFC_ABORT_IOCB   if it is an abort iocb
1227  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1228  *
1229  * The caller is not required to hold any lock.
1230  **/
1231 static lpfc_iocb_type
1232 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1233 {
1234 	lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1235 
1236 	if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1237 		return 0;
1238 
1239 	switch (iocb_cmnd) {
1240 	case CMD_XMIT_SEQUENCE_CR:
1241 	case CMD_XMIT_SEQUENCE_CX:
1242 	case CMD_XMIT_BCAST_CN:
1243 	case CMD_XMIT_BCAST_CX:
1244 	case CMD_ELS_REQUEST_CR:
1245 	case CMD_ELS_REQUEST_CX:
1246 	case CMD_CREATE_XRI_CR:
1247 	case CMD_CREATE_XRI_CX:
1248 	case CMD_GET_RPI_CN:
1249 	case CMD_XMIT_ELS_RSP_CX:
1250 	case CMD_GET_RPI_CR:
1251 	case CMD_FCP_IWRITE_CR:
1252 	case CMD_FCP_IWRITE_CX:
1253 	case CMD_FCP_IREAD_CR:
1254 	case CMD_FCP_IREAD_CX:
1255 	case CMD_FCP_ICMND_CR:
1256 	case CMD_FCP_ICMND_CX:
1257 	case CMD_FCP_TSEND_CX:
1258 	case CMD_FCP_TRSP_CX:
1259 	case CMD_FCP_TRECEIVE_CX:
1260 	case CMD_FCP_AUTO_TRSP_CX:
1261 	case CMD_ADAPTER_MSG:
1262 	case CMD_ADAPTER_DUMP:
1263 	case CMD_XMIT_SEQUENCE64_CR:
1264 	case CMD_XMIT_SEQUENCE64_CX:
1265 	case CMD_XMIT_BCAST64_CN:
1266 	case CMD_XMIT_BCAST64_CX:
1267 	case CMD_ELS_REQUEST64_CR:
1268 	case CMD_ELS_REQUEST64_CX:
1269 	case CMD_FCP_IWRITE64_CR:
1270 	case CMD_FCP_IWRITE64_CX:
1271 	case CMD_FCP_IREAD64_CR:
1272 	case CMD_FCP_IREAD64_CX:
1273 	case CMD_FCP_ICMND64_CR:
1274 	case CMD_FCP_ICMND64_CX:
1275 	case CMD_FCP_TSEND64_CX:
1276 	case CMD_FCP_TRSP64_CX:
1277 	case CMD_FCP_TRECEIVE64_CX:
1278 	case CMD_GEN_REQUEST64_CR:
1279 	case CMD_GEN_REQUEST64_CX:
1280 	case CMD_XMIT_ELS_RSP64_CX:
1281 	case DSSCMD_IWRITE64_CR:
1282 	case DSSCMD_IWRITE64_CX:
1283 	case DSSCMD_IREAD64_CR:
1284 	case DSSCMD_IREAD64_CX:
1285 		type = LPFC_SOL_IOCB;
1286 		break;
1287 	case CMD_ABORT_XRI_CN:
1288 	case CMD_ABORT_XRI_CX:
1289 	case CMD_CLOSE_XRI_CN:
1290 	case CMD_CLOSE_XRI_CX:
1291 	case CMD_XRI_ABORTED_CX:
1292 	case CMD_ABORT_MXRI64_CN:
1293 	case CMD_XMIT_BLS_RSP64_CX:
1294 		type = LPFC_ABORT_IOCB;
1295 		break;
1296 	case CMD_RCV_SEQUENCE_CX:
1297 	case CMD_RCV_ELS_REQ_CX:
1298 	case CMD_RCV_SEQUENCE64_CX:
1299 	case CMD_RCV_ELS_REQ64_CX:
1300 	case CMD_ASYNC_STATUS:
1301 	case CMD_IOCB_RCV_SEQ64_CX:
1302 	case CMD_IOCB_RCV_ELS64_CX:
1303 	case CMD_IOCB_RCV_CONT64_CX:
1304 	case CMD_IOCB_RET_XRI64_CX:
1305 		type = LPFC_UNSOL_IOCB;
1306 		break;
1307 	case CMD_IOCB_XMIT_MSEQ64_CR:
1308 	case CMD_IOCB_XMIT_MSEQ64_CX:
1309 	case CMD_IOCB_RCV_SEQ_LIST64_CX:
1310 	case CMD_IOCB_RCV_ELS_LIST64_CX:
1311 	case CMD_IOCB_CLOSE_EXTENDED_CN:
1312 	case CMD_IOCB_ABORT_EXTENDED_CN:
1313 	case CMD_IOCB_RET_HBQE64_CN:
1314 	case CMD_IOCB_FCP_IBIDIR64_CR:
1315 	case CMD_IOCB_FCP_IBIDIR64_CX:
1316 	case CMD_IOCB_FCP_ITASKMGT64_CX:
1317 	case CMD_IOCB_LOGENTRY_CN:
1318 	case CMD_IOCB_LOGENTRY_ASYNC_CN:
1319 		printk("%s - Unhandled SLI-3 Command x%x\n",
1320 				__func__, iocb_cmnd);
1321 		type = LPFC_UNKNOWN_IOCB;
1322 		break;
1323 	default:
1324 		type = LPFC_UNKNOWN_IOCB;
1325 		break;
1326 	}
1327 
1328 	return type;
1329 }
1330 
1331 /**
1332  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1333  * @phba: Pointer to HBA context object.
1334  *
1335  * This function is called from SLI initialization code
1336  * to configure every ring of the HBA's SLI interface. The
1337  * caller is not required to hold any lock. This function issues
1338  * a config_ring mailbox command for each ring.
1339  * This function returns zero if successful else returns a negative
1340  * error code.
1341  **/
1342 static int
1343 lpfc_sli_ring_map(struct lpfc_hba *phba)
1344 {
1345 	struct lpfc_sli *psli = &phba->sli;
1346 	LPFC_MBOXQ_t *pmb;
1347 	MAILBOX_t *pmbox;
1348 	int i, rc, ret = 0;
1349 
1350 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1351 	if (!pmb)
1352 		return -ENOMEM;
1353 	pmbox = &pmb->u.mb;
1354 	phba->link_state = LPFC_INIT_MBX_CMDS;
1355 	for (i = 0; i < psli->num_rings; i++) {
1356 		lpfc_config_ring(phba, i, pmb);
1357 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1358 		if (rc != MBX_SUCCESS) {
1359 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1360 					"0446 Adapter failed to init (%d), "
1361 					"mbxCmd x%x CFG_RING, mbxStatus x%x, "
1362 					"ring %d\n",
1363 					rc, pmbox->mbxCommand,
1364 					pmbox->mbxStatus, i);
1365 			phba->link_state = LPFC_HBA_ERROR;
1366 			ret = -ENXIO;
1367 			break;
1368 		}
1369 	}
1370 	mempool_free(pmb, phba->mbox_mem_pool);
1371 	return ret;
1372 }
1373 
1374 /**
1375  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1376  * @phba: Pointer to HBA context object.
1377  * @pring: Pointer to driver SLI ring object.
1378  * @piocb: Pointer to the driver iocb object.
1379  *
1380  * This function is called with hbalock held. The function adds the
1381  * new iocb to txcmplq of the given ring. This function always returns
1382  * 0. If this function is called for ELS ring, this function checks if
1383  * there is a vport associated with the ELS command. This function also
1384  * starts els_tmofunc timer if this is an ELS command.
1385  **/
1386 static int
1387 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1388 			struct lpfc_iocbq *piocb)
1389 {
1390 	lockdep_assert_held(&phba->hbalock);
1391 
1392 	BUG_ON(!piocb);
1393 
1394 	list_add_tail(&piocb->list, &pring->txcmplq);
1395 	piocb->iocb_flag |= LPFC_IO_ON_TXCMPLQ;
1396 
1397 	if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1398 	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
1399 	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
1400 		BUG_ON(!piocb->vport);
1401 		if (!(piocb->vport->load_flag & FC_UNLOADING))
1402 			mod_timer(&piocb->vport->els_tmofunc,
1403 				  jiffies +
1404 				  msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1405 	}
1406 
1407 	return 0;
1408 }
1409 
1410 /**
1411  * lpfc_sli_ringtx_get - Get first element of the txq
1412  * @phba: Pointer to HBA context object.
1413  * @pring: Pointer to driver SLI ring object.
1414  *
1415  * This function is called with hbalock held to get next
1416  * iocb in txq of the given ring. If there is any iocb in
1417  * the txq, the function returns first iocb in the list after
1418  * removing the iocb from the list, else it returns NULL.
1419  **/
1420 struct lpfc_iocbq *
1421 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1422 {
1423 	struct lpfc_iocbq *cmd_iocb;
1424 
1425 	lockdep_assert_held(&phba->hbalock);
1426 
1427 	list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1428 	return cmd_iocb;
1429 }
1430 
1431 /**
1432  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1433  * @phba: Pointer to HBA context object.
1434  * @pring: Pointer to driver SLI ring object.
1435  *
1436  * This function is called with hbalock held and the caller must post the
1437  * iocb without releasing the lock. If the caller releases the lock,
1438  * iocb slot returned by the function is not guaranteed to be available.
1439  * The function returns pointer to the next available iocb slot if there
1440  * is available slot in the ring, else it returns NULL.
1441  * If the get index of the ring is ahead of the put index, the function
1442  * will post an error attention event to the worker thread to take the
1443  * HBA to offline state.
1444  **/
1445 static IOCB_t *
1446 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1447 {
1448 	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
1449 	uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
1450 
1451 	lockdep_assert_held(&phba->hbalock);
1452 
1453 	if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
1454 	   (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
1455 		pring->sli.sli3.next_cmdidx = 0;
1456 
1457 	if (unlikely(pring->sli.sli3.local_getidx ==
1458 		pring->sli.sli3.next_cmdidx)) {
1459 
1460 		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
1461 
1462 		if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
1463 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1464 					"0315 Ring %d issue: portCmdGet %d "
1465 					"is bigger than cmd ring %d\n",
1466 					pring->ringno,
1467 					pring->sli.sli3.local_getidx,
1468 					max_cmd_idx);
1469 
1470 			phba->link_state = LPFC_HBA_ERROR;
1471 			/*
1472 			 * All error attention handlers are posted to
1473 			 * worker thread
1474 			 */
1475 			phba->work_ha |= HA_ERATT;
1476 			phba->work_hs = HS_FFER3;
1477 
1478 			lpfc_worker_wake_up(phba);
1479 
1480 			return NULL;
1481 		}
1482 
1483 		if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
1484 			return NULL;
1485 	}
1486 
1487 	return lpfc_cmd_iocb(phba, pring);
1488 }
1489 
1490 /**
1491  * lpfc_sli_next_iotag - Get an iotag for the iocb
1492  * @phba: Pointer to HBA context object.
1493  * @iocbq: Pointer to driver iocb object.
1494  *
1495  * This function gets an iotag for the iocb. If there is no unused iotag and
1496  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1497  * array and assigns a new iotag.
1498  * The function returns the allocated iotag if successful, else returns zero.
1499  * Zero is not a valid iotag.
1500  * The caller is not required to hold any lock.
1501  **/
1502 uint16_t
1503 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1504 {
1505 	struct lpfc_iocbq **new_arr;
1506 	struct lpfc_iocbq **old_arr;
1507 	size_t new_len;
1508 	struct lpfc_sli *psli = &phba->sli;
1509 	uint16_t iotag;
1510 
1511 	spin_lock_irq(&phba->hbalock);
1512 	iotag = psli->last_iotag;
1513 	if(++iotag < psli->iocbq_lookup_len) {
1514 		psli->last_iotag = iotag;
1515 		psli->iocbq_lookup[iotag] = iocbq;
1516 		spin_unlock_irq(&phba->hbalock);
1517 		iocbq->iotag = iotag;
1518 		return iotag;
1519 	} else if (psli->iocbq_lookup_len < (0xffff
1520 					   - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
1521 		new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
1522 		spin_unlock_irq(&phba->hbalock);
1523 		new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
1524 				  GFP_KERNEL);
1525 		if (new_arr) {
1526 			spin_lock_irq(&phba->hbalock);
1527 			old_arr = psli->iocbq_lookup;
1528 			if (new_len <= psli->iocbq_lookup_len) {
1529 				/* highly unprobable case */
1530 				kfree(new_arr);
1531 				iotag = psli->last_iotag;
1532 				if(++iotag < psli->iocbq_lookup_len) {
1533 					psli->last_iotag = iotag;
1534 					psli->iocbq_lookup[iotag] = iocbq;
1535 					spin_unlock_irq(&phba->hbalock);
1536 					iocbq->iotag = iotag;
1537 					return iotag;
1538 				}
1539 				spin_unlock_irq(&phba->hbalock);
1540 				return 0;
1541 			}
1542 			if (psli->iocbq_lookup)
1543 				memcpy(new_arr, old_arr,
1544 				       ((psli->last_iotag  + 1) *
1545 					sizeof (struct lpfc_iocbq *)));
1546 			psli->iocbq_lookup = new_arr;
1547 			psli->iocbq_lookup_len = new_len;
1548 			psli->last_iotag = iotag;
1549 			psli->iocbq_lookup[iotag] = iocbq;
1550 			spin_unlock_irq(&phba->hbalock);
1551 			iocbq->iotag = iotag;
1552 			kfree(old_arr);
1553 			return iotag;
1554 		}
1555 	} else
1556 		spin_unlock_irq(&phba->hbalock);
1557 
1558 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1559 			"0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1560 			psli->last_iotag);
1561 
1562 	return 0;
1563 }
1564 
1565 /**
1566  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1567  * @phba: Pointer to HBA context object.
1568  * @pring: Pointer to driver SLI ring object.
1569  * @iocb: Pointer to iocb slot in the ring.
1570  * @nextiocb: Pointer to driver iocb object which need to be
1571  *            posted to firmware.
1572  *
1573  * This function is called with hbalock held to post a new iocb to
1574  * the firmware. This function copies the new iocb to ring iocb slot and
1575  * updates the ring pointers. It adds the new iocb to txcmplq if there is
1576  * a completion call back for this iocb else the function will free the
1577  * iocb object.
1578  **/
1579 static void
1580 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1581 		IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
1582 {
1583 	lockdep_assert_held(&phba->hbalock);
1584 	/*
1585 	 * Set up an iotag
1586 	 */
1587 	nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
1588 
1589 
1590 	if (pring->ringno == LPFC_ELS_RING) {
1591 		lpfc_debugfs_slow_ring_trc(phba,
1592 			"IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1593 			*(((uint32_t *) &nextiocb->iocb) + 4),
1594 			*(((uint32_t *) &nextiocb->iocb) + 6),
1595 			*(((uint32_t *) &nextiocb->iocb) + 7));
1596 	}
1597 
1598 	/*
1599 	 * Issue iocb command to adapter
1600 	 */
1601 	lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
1602 	wmb();
1603 	pring->stats.iocb_cmd++;
1604 
1605 	/*
1606 	 * If there is no completion routine to call, we can release the
1607 	 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1608 	 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1609 	 */
1610 	if (nextiocb->iocb_cmpl)
1611 		lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
1612 	else
1613 		__lpfc_sli_release_iocbq(phba, nextiocb);
1614 
1615 	/*
1616 	 * Let the HBA know what IOCB slot will be the next one the
1617 	 * driver will put a command into.
1618 	 */
1619 	pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
1620 	writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
1621 }
1622 
1623 /**
1624  * lpfc_sli_update_full_ring - Update the chip attention register
1625  * @phba: Pointer to HBA context object.
1626  * @pring: Pointer to driver SLI ring object.
1627  *
1628  * The caller is not required to hold any lock for calling this function.
1629  * This function updates the chip attention bits for the ring to inform firmware
1630  * that there are pending work to be done for this ring and requests an
1631  * interrupt when there is space available in the ring. This function is
1632  * called when the driver is unable to post more iocbs to the ring due
1633  * to unavailability of space in the ring.
1634  **/
1635 static void
1636 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1637 {
1638 	int ringno = pring->ringno;
1639 
1640 	pring->flag |= LPFC_CALL_RING_AVAILABLE;
1641 
1642 	wmb();
1643 
1644 	/*
1645 	 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1646 	 * The HBA will tell us when an IOCB entry is available.
1647 	 */
1648 	writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
1649 	readl(phba->CAregaddr); /* flush */
1650 
1651 	pring->stats.iocb_cmd_full++;
1652 }
1653 
1654 /**
1655  * lpfc_sli_update_ring - Update chip attention register
1656  * @phba: Pointer to HBA context object.
1657  * @pring: Pointer to driver SLI ring object.
1658  *
1659  * This function updates the chip attention register bit for the
1660  * given ring to inform HBA that there is more work to be done
1661  * in this ring. The caller is not required to hold any lock.
1662  **/
1663 static void
1664 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1665 {
1666 	int ringno = pring->ringno;
1667 
1668 	/*
1669 	 * Tell the HBA that there is work to do in this ring.
1670 	 */
1671 	if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
1672 		wmb();
1673 		writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
1674 		readl(phba->CAregaddr); /* flush */
1675 	}
1676 }
1677 
1678 /**
1679  * lpfc_sli_resume_iocb - Process iocbs in the txq
1680  * @phba: Pointer to HBA context object.
1681  * @pring: Pointer to driver SLI ring object.
1682  *
1683  * This function is called with hbalock held to post pending iocbs
1684  * in the txq to the firmware. This function is called when driver
1685  * detects space available in the ring.
1686  **/
1687 static void
1688 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1689 {
1690 	IOCB_t *iocb;
1691 	struct lpfc_iocbq *nextiocb;
1692 
1693 	lockdep_assert_held(&phba->hbalock);
1694 
1695 	/*
1696 	 * Check to see if:
1697 	 *  (a) there is anything on the txq to send
1698 	 *  (b) link is up
1699 	 *  (c) link attention events can be processed (fcp ring only)
1700 	 *  (d) IOCB processing is not blocked by the outstanding mbox command.
1701 	 */
1702 
1703 	if (lpfc_is_link_up(phba) &&
1704 	    (!list_empty(&pring->txq)) &&
1705 	    (pring->ringno != LPFC_FCP_RING ||
1706 	     phba->sli.sli_flag & LPFC_PROCESS_LA)) {
1707 
1708 		while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
1709 		       (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
1710 			lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
1711 
1712 		if (iocb)
1713 			lpfc_sli_update_ring(phba, pring);
1714 		else
1715 			lpfc_sli_update_full_ring(phba, pring);
1716 	}
1717 
1718 	return;
1719 }
1720 
1721 /**
1722  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1723  * @phba: Pointer to HBA context object.
1724  * @hbqno: HBQ number.
1725  *
1726  * This function is called with hbalock held to get the next
1727  * available slot for the given HBQ. If there is free slot
1728  * available for the HBQ it will return pointer to the next available
1729  * HBQ entry else it will return NULL.
1730  **/
1731 static struct lpfc_hbq_entry *
1732 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
1733 {
1734 	struct hbq_s *hbqp = &phba->hbqs[hbqno];
1735 
1736 	lockdep_assert_held(&phba->hbalock);
1737 
1738 	if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
1739 	    ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
1740 		hbqp->next_hbqPutIdx = 0;
1741 
1742 	if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
1743 		uint32_t raw_index = phba->hbq_get[hbqno];
1744 		uint32_t getidx = le32_to_cpu(raw_index);
1745 
1746 		hbqp->local_hbqGetIdx = getidx;
1747 
1748 		if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
1749 			lpfc_printf_log(phba, KERN_ERR,
1750 					LOG_SLI | LOG_VPORT,
1751 					"1802 HBQ %d: local_hbqGetIdx "
1752 					"%u is > than hbqp->entry_count %u\n",
1753 					hbqno, hbqp->local_hbqGetIdx,
1754 					hbqp->entry_count);
1755 
1756 			phba->link_state = LPFC_HBA_ERROR;
1757 			return NULL;
1758 		}
1759 
1760 		if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
1761 			return NULL;
1762 	}
1763 
1764 	return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
1765 			hbqp->hbqPutIdx;
1766 }
1767 
1768 /**
1769  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1770  * @phba: Pointer to HBA context object.
1771  *
1772  * This function is called with no lock held to free all the
1773  * hbq buffers while uninitializing the SLI interface. It also
1774  * frees the HBQ buffers returned by the firmware but not yet
1775  * processed by the upper layers.
1776  **/
1777 void
1778 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
1779 {
1780 	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
1781 	struct hbq_dmabuf *hbq_buf;
1782 	unsigned long flags;
1783 	int i, hbq_count;
1784 
1785 	hbq_count = lpfc_sli_hbq_count();
1786 	/* Return all memory used by all HBQs */
1787 	spin_lock_irqsave(&phba->hbalock, flags);
1788 	for (i = 0; i < hbq_count; ++i) {
1789 		list_for_each_entry_safe(dmabuf, next_dmabuf,
1790 				&phba->hbqs[i].hbq_buffer_list, list) {
1791 			hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
1792 			list_del(&hbq_buf->dbuf.list);
1793 			(phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
1794 		}
1795 		phba->hbqs[i].buffer_count = 0;
1796 	}
1797 
1798 	/* Mark the HBQs not in use */
1799 	phba->hbq_in_use = 0;
1800 	spin_unlock_irqrestore(&phba->hbalock, flags);
1801 }
1802 
1803 /**
1804  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1805  * @phba: Pointer to HBA context object.
1806  * @hbqno: HBQ number.
1807  * @hbq_buf: Pointer to HBQ buffer.
1808  *
1809  * This function is called with the hbalock held to post a
1810  * hbq buffer to the firmware. If the function finds an empty
1811  * slot in the HBQ, it will post the buffer. The function will return
1812  * pointer to the hbq entry if it successfully post the buffer
1813  * else it will return NULL.
1814  **/
1815 static int
1816 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
1817 			 struct hbq_dmabuf *hbq_buf)
1818 {
1819 	lockdep_assert_held(&phba->hbalock);
1820 	return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
1821 }
1822 
1823 /**
1824  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1825  * @phba: Pointer to HBA context object.
1826  * @hbqno: HBQ number.
1827  * @hbq_buf: Pointer to HBQ buffer.
1828  *
1829  * This function is called with the hbalock held to post a hbq buffer to the
1830  * firmware. If the function finds an empty slot in the HBQ, it will post the
1831  * buffer and place it on the hbq_buffer_list. The function will return zero if
1832  * it successfully post the buffer else it will return an error.
1833  **/
1834 static int
1835 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
1836 			    struct hbq_dmabuf *hbq_buf)
1837 {
1838 	struct lpfc_hbq_entry *hbqe;
1839 	dma_addr_t physaddr = hbq_buf->dbuf.phys;
1840 
1841 	lockdep_assert_held(&phba->hbalock);
1842 	/* Get next HBQ entry slot to use */
1843 	hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
1844 	if (hbqe) {
1845 		struct hbq_s *hbqp = &phba->hbqs[hbqno];
1846 
1847 		hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
1848 		hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
1849 		hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
1850 		hbqe->bde.tus.f.bdeFlags = 0;
1851 		hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
1852 		hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
1853 				/* Sync SLIM */
1854 		hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
1855 		writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
1856 				/* flush */
1857 		readl(phba->hbq_put + hbqno);
1858 		list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
1859 		return 0;
1860 	} else
1861 		return -ENOMEM;
1862 }
1863 
1864 /**
1865  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1866  * @phba: Pointer to HBA context object.
1867  * @hbqno: HBQ number.
1868  * @hbq_buf: Pointer to HBQ buffer.
1869  *
1870  * This function is called with the hbalock held to post an RQE to the SLI4
1871  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1872  * the hbq_buffer_list and return zero, otherwise it will return an error.
1873  **/
1874 static int
1875 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
1876 			    struct hbq_dmabuf *hbq_buf)
1877 {
1878 	int rc;
1879 	struct lpfc_rqe hrqe;
1880 	struct lpfc_rqe drqe;
1881 	struct lpfc_queue *hrq;
1882 	struct lpfc_queue *drq;
1883 
1884 	if (hbqno != LPFC_ELS_HBQ)
1885 		return 1;
1886 	hrq = phba->sli4_hba.hdr_rq;
1887 	drq = phba->sli4_hba.dat_rq;
1888 
1889 	lockdep_assert_held(&phba->hbalock);
1890 	hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
1891 	hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
1892 	drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
1893 	drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
1894 	rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
1895 	if (rc < 0)
1896 		return rc;
1897 	hbq_buf->tag = (rc | (hbqno << 16));
1898 	list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
1899 	return 0;
1900 }
1901 
1902 /* HBQ for ELS and CT traffic. */
1903 static struct lpfc_hbq_init lpfc_els_hbq = {
1904 	.rn = 1,
1905 	.entry_count = 256,
1906 	.mask_count = 0,
1907 	.profile = 0,
1908 	.ring_mask = (1 << LPFC_ELS_RING),
1909 	.buffer_count = 0,
1910 	.init_count = 40,
1911 	.add_count = 40,
1912 };
1913 
1914 /* Array of HBQs */
1915 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
1916 	&lpfc_els_hbq,
1917 };
1918 
1919 /**
1920  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1921  * @phba: Pointer to HBA context object.
1922  * @hbqno: HBQ number.
1923  * @count: Number of HBQ buffers to be posted.
1924  *
1925  * This function is called with no lock held to post more hbq buffers to the
1926  * given HBQ. The function returns the number of HBQ buffers successfully
1927  * posted.
1928  **/
1929 static int
1930 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
1931 {
1932 	uint32_t i, posted = 0;
1933 	unsigned long flags;
1934 	struct hbq_dmabuf *hbq_buffer;
1935 	LIST_HEAD(hbq_buf_list);
1936 	if (!phba->hbqs[hbqno].hbq_alloc_buffer)
1937 		return 0;
1938 
1939 	if ((phba->hbqs[hbqno].buffer_count + count) >
1940 	    lpfc_hbq_defs[hbqno]->entry_count)
1941 		count = lpfc_hbq_defs[hbqno]->entry_count -
1942 					phba->hbqs[hbqno].buffer_count;
1943 	if (!count)
1944 		return 0;
1945 	/* Allocate HBQ entries */
1946 	for (i = 0; i < count; i++) {
1947 		hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
1948 		if (!hbq_buffer)
1949 			break;
1950 		list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
1951 	}
1952 	/* Check whether HBQ is still in use */
1953 	spin_lock_irqsave(&phba->hbalock, flags);
1954 	if (!phba->hbq_in_use)
1955 		goto err;
1956 	while (!list_empty(&hbq_buf_list)) {
1957 		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1958 				 dbuf.list);
1959 		hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
1960 				      (hbqno << 16));
1961 		if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
1962 			phba->hbqs[hbqno].buffer_count++;
1963 			posted++;
1964 		} else
1965 			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1966 	}
1967 	spin_unlock_irqrestore(&phba->hbalock, flags);
1968 	return posted;
1969 err:
1970 	spin_unlock_irqrestore(&phba->hbalock, flags);
1971 	while (!list_empty(&hbq_buf_list)) {
1972 		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
1973 				 dbuf.list);
1974 		(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
1975 	}
1976 	return 0;
1977 }
1978 
1979 /**
1980  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1981  * @phba: Pointer to HBA context object.
1982  * @qno: HBQ number.
1983  *
1984  * This function posts more buffers to the HBQ. This function
1985  * is called with no lock held. The function returns the number of HBQ entries
1986  * successfully allocated.
1987  **/
1988 int
1989 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
1990 {
1991 	if (phba->sli_rev == LPFC_SLI_REV4)
1992 		return 0;
1993 	else
1994 		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
1995 					 lpfc_hbq_defs[qno]->add_count);
1996 }
1997 
1998 /**
1999  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2000  * @phba: Pointer to HBA context object.
2001  * @qno:  HBQ queue number.
2002  *
2003  * This function is called from SLI initialization code path with
2004  * no lock held to post initial HBQ buffers to firmware. The
2005  * function returns the number of HBQ entries successfully allocated.
2006  **/
2007 static int
2008 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2009 {
2010 	if (phba->sli_rev == LPFC_SLI_REV4)
2011 		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2012 					lpfc_hbq_defs[qno]->entry_count);
2013 	else
2014 		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2015 					 lpfc_hbq_defs[qno]->init_count);
2016 }
2017 
2018 /**
2019  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2020  * @phba: Pointer to HBA context object.
2021  * @hbqno: HBQ number.
2022  *
2023  * This function removes the first hbq buffer on an hbq list and returns a
2024  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2025  **/
2026 static struct hbq_dmabuf *
2027 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2028 {
2029 	struct lpfc_dmabuf *d_buf;
2030 
2031 	list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2032 	if (!d_buf)
2033 		return NULL;
2034 	return container_of(d_buf, struct hbq_dmabuf, dbuf);
2035 }
2036 
2037 /**
2038  * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2039  * @phba: Pointer to HBA context object.
2040  * @hbqno: HBQ number.
2041  *
2042  * This function removes the first RQ buffer on an RQ buffer list and returns a
2043  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2044  **/
2045 static struct rqb_dmabuf *
2046 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2047 {
2048 	struct lpfc_dmabuf *h_buf;
2049 	struct lpfc_rqb *rqbp;
2050 
2051 	rqbp = hrq->rqbp;
2052 	list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2053 			 struct lpfc_dmabuf, list);
2054 	if (!h_buf)
2055 		return NULL;
2056 	rqbp->buffer_count--;
2057 	return container_of(h_buf, struct rqb_dmabuf, hbuf);
2058 }
2059 
2060 /**
2061  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2062  * @phba: Pointer to HBA context object.
2063  * @tag: Tag of the hbq buffer.
2064  *
2065  * This function searches for the hbq buffer associated with the given tag in
2066  * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2067  * otherwise it returns NULL.
2068  **/
2069 static struct hbq_dmabuf *
2070 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2071 {
2072 	struct lpfc_dmabuf *d_buf;
2073 	struct hbq_dmabuf *hbq_buf;
2074 	uint32_t hbqno;
2075 
2076 	hbqno = tag >> 16;
2077 	if (hbqno >= LPFC_MAX_HBQS)
2078 		return NULL;
2079 
2080 	spin_lock_irq(&phba->hbalock);
2081 	list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2082 		hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2083 		if (hbq_buf->tag == tag) {
2084 			spin_unlock_irq(&phba->hbalock);
2085 			return hbq_buf;
2086 		}
2087 	}
2088 	spin_unlock_irq(&phba->hbalock);
2089 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
2090 			"1803 Bad hbq tag. Data: x%x x%x\n",
2091 			tag, phba->hbqs[tag >> 16].buffer_count);
2092 	return NULL;
2093 }
2094 
2095 /**
2096  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2097  * @phba: Pointer to HBA context object.
2098  * @hbq_buffer: Pointer to HBQ buffer.
2099  *
2100  * This function is called with hbalock. This function gives back
2101  * the hbq buffer to firmware. If the HBQ does not have space to
2102  * post the buffer, it will free the buffer.
2103  **/
2104 void
2105 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2106 {
2107 	uint32_t hbqno;
2108 
2109 	if (hbq_buffer) {
2110 		hbqno = hbq_buffer->tag >> 16;
2111 		if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2112 			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2113 	}
2114 }
2115 
2116 /**
2117  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2118  * @mbxCommand: mailbox command code.
2119  *
2120  * This function is called by the mailbox event handler function to verify
2121  * that the completed mailbox command is a legitimate mailbox command. If the
2122  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2123  * and the mailbox event handler will take the HBA offline.
2124  **/
2125 static int
2126 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2127 {
2128 	uint8_t ret;
2129 
2130 	switch (mbxCommand) {
2131 	case MBX_LOAD_SM:
2132 	case MBX_READ_NV:
2133 	case MBX_WRITE_NV:
2134 	case MBX_WRITE_VPARMS:
2135 	case MBX_RUN_BIU_DIAG:
2136 	case MBX_INIT_LINK:
2137 	case MBX_DOWN_LINK:
2138 	case MBX_CONFIG_LINK:
2139 	case MBX_CONFIG_RING:
2140 	case MBX_RESET_RING:
2141 	case MBX_READ_CONFIG:
2142 	case MBX_READ_RCONFIG:
2143 	case MBX_READ_SPARM:
2144 	case MBX_READ_STATUS:
2145 	case MBX_READ_RPI:
2146 	case MBX_READ_XRI:
2147 	case MBX_READ_REV:
2148 	case MBX_READ_LNK_STAT:
2149 	case MBX_REG_LOGIN:
2150 	case MBX_UNREG_LOGIN:
2151 	case MBX_CLEAR_LA:
2152 	case MBX_DUMP_MEMORY:
2153 	case MBX_DUMP_CONTEXT:
2154 	case MBX_RUN_DIAGS:
2155 	case MBX_RESTART:
2156 	case MBX_UPDATE_CFG:
2157 	case MBX_DOWN_LOAD:
2158 	case MBX_DEL_LD_ENTRY:
2159 	case MBX_RUN_PROGRAM:
2160 	case MBX_SET_MASK:
2161 	case MBX_SET_VARIABLE:
2162 	case MBX_UNREG_D_ID:
2163 	case MBX_KILL_BOARD:
2164 	case MBX_CONFIG_FARP:
2165 	case MBX_BEACON:
2166 	case MBX_LOAD_AREA:
2167 	case MBX_RUN_BIU_DIAG64:
2168 	case MBX_CONFIG_PORT:
2169 	case MBX_READ_SPARM64:
2170 	case MBX_READ_RPI64:
2171 	case MBX_REG_LOGIN64:
2172 	case MBX_READ_TOPOLOGY:
2173 	case MBX_WRITE_WWN:
2174 	case MBX_SET_DEBUG:
2175 	case MBX_LOAD_EXP_ROM:
2176 	case MBX_ASYNCEVT_ENABLE:
2177 	case MBX_REG_VPI:
2178 	case MBX_UNREG_VPI:
2179 	case MBX_HEARTBEAT:
2180 	case MBX_PORT_CAPABILITIES:
2181 	case MBX_PORT_IOV_CONTROL:
2182 	case MBX_SLI4_CONFIG:
2183 	case MBX_SLI4_REQ_FTRS:
2184 	case MBX_REG_FCFI:
2185 	case MBX_UNREG_FCFI:
2186 	case MBX_REG_VFI:
2187 	case MBX_UNREG_VFI:
2188 	case MBX_INIT_VPI:
2189 	case MBX_INIT_VFI:
2190 	case MBX_RESUME_RPI:
2191 	case MBX_READ_EVENT_LOG_STATUS:
2192 	case MBX_READ_EVENT_LOG:
2193 	case MBX_SECURITY_MGMT:
2194 	case MBX_AUTH_PORT:
2195 	case MBX_ACCESS_VDATA:
2196 		ret = mbxCommand;
2197 		break;
2198 	default:
2199 		ret = MBX_SHUTDOWN;
2200 		break;
2201 	}
2202 	return ret;
2203 }
2204 
2205 /**
2206  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2207  * @phba: Pointer to HBA context object.
2208  * @pmboxq: Pointer to mailbox command.
2209  *
2210  * This is completion handler function for mailbox commands issued from
2211  * lpfc_sli_issue_mbox_wait function. This function is called by the
2212  * mailbox event handler function with no lock held. This function
2213  * will wake up thread waiting on the wait queue pointed by context1
2214  * of the mailbox.
2215  **/
2216 void
2217 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2218 {
2219 	wait_queue_head_t *pdone_q;
2220 	unsigned long drvr_flag;
2221 
2222 	/*
2223 	 * If pdone_q is empty, the driver thread gave up waiting and
2224 	 * continued running.
2225 	 */
2226 	pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2227 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
2228 	pdone_q = (wait_queue_head_t *) pmboxq->context1;
2229 	if (pdone_q)
2230 		wake_up_interruptible(pdone_q);
2231 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2232 	return;
2233 }
2234 
2235 
2236 /**
2237  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2238  * @phba: Pointer to HBA context object.
2239  * @pmb: Pointer to mailbox object.
2240  *
2241  * This function is the default mailbox completion handler. It
2242  * frees the memory resources associated with the completed mailbox
2243  * command. If the completed command is a REG_LOGIN mailbox command,
2244  * this function will issue a UREG_LOGIN to re-claim the RPI.
2245  **/
2246 void
2247 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2248 {
2249 	struct lpfc_vport  *vport = pmb->vport;
2250 	struct lpfc_dmabuf *mp;
2251 	struct lpfc_nodelist *ndlp;
2252 	struct Scsi_Host *shost;
2253 	uint16_t rpi, vpi;
2254 	int rc;
2255 
2256 	mp = (struct lpfc_dmabuf *) (pmb->context1);
2257 
2258 	if (mp) {
2259 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
2260 		kfree(mp);
2261 	}
2262 
2263 	/*
2264 	 * If a REG_LOGIN succeeded  after node is destroyed or node
2265 	 * is in re-discovery driver need to cleanup the RPI.
2266 	 */
2267 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
2268 	    pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2269 	    !pmb->u.mb.mbxStatus) {
2270 		rpi = pmb->u.mb.un.varWords[0];
2271 		vpi = pmb->u.mb.un.varRegLogin.vpi;
2272 		lpfc_unreg_login(phba, vpi, rpi, pmb);
2273 		pmb->vport = vport;
2274 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2275 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2276 		if (rc != MBX_NOT_FINISHED)
2277 			return;
2278 	}
2279 
2280 	if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2281 		!(phba->pport->load_flag & FC_UNLOADING) &&
2282 		!pmb->u.mb.mbxStatus) {
2283 		shost = lpfc_shost_from_vport(vport);
2284 		spin_lock_irq(shost->host_lock);
2285 		vport->vpi_state |= LPFC_VPI_REGISTERED;
2286 		vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2287 		spin_unlock_irq(shost->host_lock);
2288 	}
2289 
2290 	if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2291 		ndlp = (struct lpfc_nodelist *)pmb->context2;
2292 		lpfc_nlp_put(ndlp);
2293 		pmb->context2 = NULL;
2294 	}
2295 
2296 	/* Check security permission status on INIT_LINK mailbox command */
2297 	if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2298 	    (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2299 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2300 				"2860 SLI authentication is required "
2301 				"for INIT_LINK but has not done yet\n");
2302 
2303 	if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2304 		lpfc_sli4_mbox_cmd_free(phba, pmb);
2305 	else
2306 		mempool_free(pmb, phba->mbox_mem_pool);
2307 }
2308  /**
2309  * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2310  * @phba: Pointer to HBA context object.
2311  * @pmb: Pointer to mailbox object.
2312  *
2313  * This function is the unreg rpi mailbox completion handler. It
2314  * frees the memory resources associated with the completed mailbox
2315  * command. An additional refrenece is put on the ndlp to prevent
2316  * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2317  * the unreg mailbox command completes, this routine puts the
2318  * reference back.
2319  *
2320  **/
2321 void
2322 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2323 {
2324 	struct lpfc_vport  *vport = pmb->vport;
2325 	struct lpfc_nodelist *ndlp;
2326 
2327 	ndlp = pmb->context1;
2328 	if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2329 		if (phba->sli_rev == LPFC_SLI_REV4 &&
2330 		    (bf_get(lpfc_sli_intf_if_type,
2331 		     &phba->sli4_hba.sli_intf) ==
2332 		     LPFC_SLI_INTF_IF_TYPE_2)) {
2333 			if (ndlp) {
2334 				lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
2335 						 "0010 UNREG_LOGIN vpi:%x "
2336 						 "rpi:%x DID:%x map:%x %p\n",
2337 						 vport->vpi, ndlp->nlp_rpi,
2338 						 ndlp->nlp_DID,
2339 						 ndlp->nlp_usg_map, ndlp);
2340 				ndlp->nlp_flag &= ~NLP_LOGO_ACC;
2341 				lpfc_nlp_put(ndlp);
2342 			}
2343 		}
2344 	}
2345 
2346 	mempool_free(pmb, phba->mbox_mem_pool);
2347 }
2348 
2349 /**
2350  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2351  * @phba: Pointer to HBA context object.
2352  *
2353  * This function is called with no lock held. This function processes all
2354  * the completed mailbox commands and gives it to upper layers. The interrupt
2355  * service routine processes mailbox completion interrupt and adds completed
2356  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2357  * Worker thread call lpfc_sli_handle_mb_event, which will return the
2358  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2359  * function returns the mailbox commands to the upper layer by calling the
2360  * completion handler function of each mailbox.
2361  **/
2362 int
2363 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
2364 {
2365 	MAILBOX_t *pmbox;
2366 	LPFC_MBOXQ_t *pmb;
2367 	int rc;
2368 	LIST_HEAD(cmplq);
2369 
2370 	phba->sli.slistat.mbox_event++;
2371 
2372 	/* Get all completed mailboxe buffers into the cmplq */
2373 	spin_lock_irq(&phba->hbalock);
2374 	list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
2375 	spin_unlock_irq(&phba->hbalock);
2376 
2377 	/* Get a Mailbox buffer to setup mailbox commands for callback */
2378 	do {
2379 		list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
2380 		if (pmb == NULL)
2381 			break;
2382 
2383 		pmbox = &pmb->u.mb;
2384 
2385 		if (pmbox->mbxCommand != MBX_HEARTBEAT) {
2386 			if (pmb->vport) {
2387 				lpfc_debugfs_disc_trc(pmb->vport,
2388 					LPFC_DISC_TRC_MBOX_VPORT,
2389 					"MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2390 					(uint32_t)pmbox->mbxCommand,
2391 					pmbox->un.varWords[0],
2392 					pmbox->un.varWords[1]);
2393 			}
2394 			else {
2395 				lpfc_debugfs_disc_trc(phba->pport,
2396 					LPFC_DISC_TRC_MBOX,
2397 					"MBOX cmpl:       cmd:x%x mb:x%x x%x",
2398 					(uint32_t)pmbox->mbxCommand,
2399 					pmbox->un.varWords[0],
2400 					pmbox->un.varWords[1]);
2401 			}
2402 		}
2403 
2404 		/*
2405 		 * It is a fatal error if unknown mbox command completion.
2406 		 */
2407 		if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
2408 		    MBX_SHUTDOWN) {
2409 			/* Unknown mailbox command compl */
2410 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2411 					"(%d):0323 Unknown Mailbox command "
2412 					"x%x (x%x/x%x) Cmpl\n",
2413 					pmb->vport ? pmb->vport->vpi : 0,
2414 					pmbox->mbxCommand,
2415 					lpfc_sli_config_mbox_subsys_get(phba,
2416 									pmb),
2417 					lpfc_sli_config_mbox_opcode_get(phba,
2418 									pmb));
2419 			phba->link_state = LPFC_HBA_ERROR;
2420 			phba->work_hs = HS_FFER3;
2421 			lpfc_handle_eratt(phba);
2422 			continue;
2423 		}
2424 
2425 		if (pmbox->mbxStatus) {
2426 			phba->sli.slistat.mbox_stat_err++;
2427 			if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
2428 				/* Mbox cmd cmpl error - RETRYing */
2429 				lpfc_printf_log(phba, KERN_INFO,
2430 					LOG_MBOX | LOG_SLI,
2431 					"(%d):0305 Mbox cmd cmpl "
2432 					"error - RETRYing Data: x%x "
2433 					"(x%x/x%x) x%x x%x x%x\n",
2434 					pmb->vport ? pmb->vport->vpi : 0,
2435 					pmbox->mbxCommand,
2436 					lpfc_sli_config_mbox_subsys_get(phba,
2437 									pmb),
2438 					lpfc_sli_config_mbox_opcode_get(phba,
2439 									pmb),
2440 					pmbox->mbxStatus,
2441 					pmbox->un.varWords[0],
2442 					pmb->vport->port_state);
2443 				pmbox->mbxStatus = 0;
2444 				pmbox->mbxOwner = OWN_HOST;
2445 				rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2446 				if (rc != MBX_NOT_FINISHED)
2447 					continue;
2448 			}
2449 		}
2450 
2451 		/* Mailbox cmd <cmd> Cmpl <cmpl> */
2452 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2453 				"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2454 				"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2455 				"x%x x%x x%x\n",
2456 				pmb->vport ? pmb->vport->vpi : 0,
2457 				pmbox->mbxCommand,
2458 				lpfc_sli_config_mbox_subsys_get(phba, pmb),
2459 				lpfc_sli_config_mbox_opcode_get(phba, pmb),
2460 				pmb->mbox_cmpl,
2461 				*((uint32_t *) pmbox),
2462 				pmbox->un.varWords[0],
2463 				pmbox->un.varWords[1],
2464 				pmbox->un.varWords[2],
2465 				pmbox->un.varWords[3],
2466 				pmbox->un.varWords[4],
2467 				pmbox->un.varWords[5],
2468 				pmbox->un.varWords[6],
2469 				pmbox->un.varWords[7],
2470 				pmbox->un.varWords[8],
2471 				pmbox->un.varWords[9],
2472 				pmbox->un.varWords[10]);
2473 
2474 		if (pmb->mbox_cmpl)
2475 			pmb->mbox_cmpl(phba,pmb);
2476 	} while (1);
2477 	return 0;
2478 }
2479 
2480 /**
2481  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2482  * @phba: Pointer to HBA context object.
2483  * @pring: Pointer to driver SLI ring object.
2484  * @tag: buffer tag.
2485  *
2486  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2487  * is set in the tag the buffer is posted for a particular exchange,
2488  * the function will return the buffer without replacing the buffer.
2489  * If the buffer is for unsolicited ELS or CT traffic, this function
2490  * returns the buffer and also posts another buffer to the firmware.
2491  **/
2492 static struct lpfc_dmabuf *
2493 lpfc_sli_get_buff(struct lpfc_hba *phba,
2494 		  struct lpfc_sli_ring *pring,
2495 		  uint32_t tag)
2496 {
2497 	struct hbq_dmabuf *hbq_entry;
2498 
2499 	if (tag & QUE_BUFTAG_BIT)
2500 		return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
2501 	hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
2502 	if (!hbq_entry)
2503 		return NULL;
2504 	return &hbq_entry->dbuf;
2505 }
2506 
2507 /**
2508  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2509  * @phba: Pointer to HBA context object.
2510  * @pring: Pointer to driver SLI ring object.
2511  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2512  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2513  * @fch_type: the type for the first frame of the sequence.
2514  *
2515  * This function is called with no lock held. This function uses the r_ctl and
2516  * type of the received sequence to find the correct callback function to call
2517  * to process the sequence.
2518  **/
2519 static int
2520 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2521 			 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
2522 			 uint32_t fch_type)
2523 {
2524 	int i;
2525 
2526 	switch (fch_type) {
2527 	case FC_TYPE_NVME:
2528 		lpfc_nvmet_unsol_ls_event(phba, pring, saveq);
2529 		return 1;
2530 	default:
2531 		break;
2532 	}
2533 
2534 	/* unSolicited Responses */
2535 	if (pring->prt[0].profile) {
2536 		if (pring->prt[0].lpfc_sli_rcv_unsol_event)
2537 			(pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
2538 									saveq);
2539 		return 1;
2540 	}
2541 	/* We must search, based on rctl / type
2542 	   for the right routine */
2543 	for (i = 0; i < pring->num_mask; i++) {
2544 		if ((pring->prt[i].rctl == fch_r_ctl) &&
2545 		    (pring->prt[i].type == fch_type)) {
2546 			if (pring->prt[i].lpfc_sli_rcv_unsol_event)
2547 				(pring->prt[i].lpfc_sli_rcv_unsol_event)
2548 						(phba, pring, saveq);
2549 			return 1;
2550 		}
2551 	}
2552 	return 0;
2553 }
2554 
2555 /**
2556  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2557  * @phba: Pointer to HBA context object.
2558  * @pring: Pointer to driver SLI ring object.
2559  * @saveq: Pointer to the unsolicited iocb.
2560  *
2561  * This function is called with no lock held by the ring event handler
2562  * when there is an unsolicited iocb posted to the response ring by the
2563  * firmware. This function gets the buffer associated with the iocbs
2564  * and calls the event handler for the ring. This function handles both
2565  * qring buffers and hbq buffers.
2566  * When the function returns 1 the caller can free the iocb object otherwise
2567  * upper layer functions will free the iocb objects.
2568  **/
2569 static int
2570 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2571 			    struct lpfc_iocbq *saveq)
2572 {
2573 	IOCB_t           * irsp;
2574 	WORD5            * w5p;
2575 	uint32_t           Rctl, Type;
2576 	struct lpfc_iocbq *iocbq;
2577 	struct lpfc_dmabuf *dmzbuf;
2578 
2579 	irsp = &(saveq->iocb);
2580 
2581 	if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
2582 		if (pring->lpfc_sli_rcv_async_status)
2583 			pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
2584 		else
2585 			lpfc_printf_log(phba,
2586 					KERN_WARNING,
2587 					LOG_SLI,
2588 					"0316 Ring %d handler: unexpected "
2589 					"ASYNC_STATUS iocb received evt_code "
2590 					"0x%x\n",
2591 					pring->ringno,
2592 					irsp->un.asyncstat.evt_code);
2593 		return 1;
2594 	}
2595 
2596 	if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
2597 		(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
2598 		if (irsp->ulpBdeCount > 0) {
2599 			dmzbuf = lpfc_sli_get_buff(phba, pring,
2600 					irsp->un.ulpWord[3]);
2601 			lpfc_in_buf_free(phba, dmzbuf);
2602 		}
2603 
2604 		if (irsp->ulpBdeCount > 1) {
2605 			dmzbuf = lpfc_sli_get_buff(phba, pring,
2606 					irsp->unsli3.sli3Words[3]);
2607 			lpfc_in_buf_free(phba, dmzbuf);
2608 		}
2609 
2610 		if (irsp->ulpBdeCount > 2) {
2611 			dmzbuf = lpfc_sli_get_buff(phba, pring,
2612 				irsp->unsli3.sli3Words[7]);
2613 			lpfc_in_buf_free(phba, dmzbuf);
2614 		}
2615 
2616 		return 1;
2617 	}
2618 
2619 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2620 		if (irsp->ulpBdeCount != 0) {
2621 			saveq->context2 = lpfc_sli_get_buff(phba, pring,
2622 						irsp->un.ulpWord[3]);
2623 			if (!saveq->context2)
2624 				lpfc_printf_log(phba,
2625 					KERN_ERR,
2626 					LOG_SLI,
2627 					"0341 Ring %d Cannot find buffer for "
2628 					"an unsolicited iocb. tag 0x%x\n",
2629 					pring->ringno,
2630 					irsp->un.ulpWord[3]);
2631 		}
2632 		if (irsp->ulpBdeCount == 2) {
2633 			saveq->context3 = lpfc_sli_get_buff(phba, pring,
2634 						irsp->unsli3.sli3Words[7]);
2635 			if (!saveq->context3)
2636 				lpfc_printf_log(phba,
2637 					KERN_ERR,
2638 					LOG_SLI,
2639 					"0342 Ring %d Cannot find buffer for an"
2640 					" unsolicited iocb. tag 0x%x\n",
2641 					pring->ringno,
2642 					irsp->unsli3.sli3Words[7]);
2643 		}
2644 		list_for_each_entry(iocbq, &saveq->list, list) {
2645 			irsp = &(iocbq->iocb);
2646 			if (irsp->ulpBdeCount != 0) {
2647 				iocbq->context2 = lpfc_sli_get_buff(phba, pring,
2648 							irsp->un.ulpWord[3]);
2649 				if (!iocbq->context2)
2650 					lpfc_printf_log(phba,
2651 						KERN_ERR,
2652 						LOG_SLI,
2653 						"0343 Ring %d Cannot find "
2654 						"buffer for an unsolicited iocb"
2655 						". tag 0x%x\n", pring->ringno,
2656 						irsp->un.ulpWord[3]);
2657 			}
2658 			if (irsp->ulpBdeCount == 2) {
2659 				iocbq->context3 = lpfc_sli_get_buff(phba, pring,
2660 						irsp->unsli3.sli3Words[7]);
2661 				if (!iocbq->context3)
2662 					lpfc_printf_log(phba,
2663 						KERN_ERR,
2664 						LOG_SLI,
2665 						"0344 Ring %d Cannot find "
2666 						"buffer for an unsolicited "
2667 						"iocb. tag 0x%x\n",
2668 						pring->ringno,
2669 						irsp->unsli3.sli3Words[7]);
2670 			}
2671 		}
2672 	}
2673 	if (irsp->ulpBdeCount != 0 &&
2674 	    (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
2675 	     irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
2676 		int found = 0;
2677 
2678 		/* search continue save q for same XRI */
2679 		list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
2680 			if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
2681 				saveq->iocb.unsli3.rcvsli3.ox_id) {
2682 				list_add_tail(&saveq->list, &iocbq->list);
2683 				found = 1;
2684 				break;
2685 			}
2686 		}
2687 		if (!found)
2688 			list_add_tail(&saveq->clist,
2689 				      &pring->iocb_continue_saveq);
2690 		if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
2691 			list_del_init(&iocbq->clist);
2692 			saveq = iocbq;
2693 			irsp = &(saveq->iocb);
2694 		} else
2695 			return 0;
2696 	}
2697 	if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
2698 	    (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
2699 	    (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
2700 		Rctl = FC_RCTL_ELS_REQ;
2701 		Type = FC_TYPE_ELS;
2702 	} else {
2703 		w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
2704 		Rctl = w5p->hcsw.Rctl;
2705 		Type = w5p->hcsw.Type;
2706 
2707 		/* Firmware Workaround */
2708 		if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
2709 			(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
2710 			 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
2711 			Rctl = FC_RCTL_ELS_REQ;
2712 			Type = FC_TYPE_ELS;
2713 			w5p->hcsw.Rctl = Rctl;
2714 			w5p->hcsw.Type = Type;
2715 		}
2716 	}
2717 
2718 	if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
2719 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2720 				"0313 Ring %d handler: unexpected Rctl x%x "
2721 				"Type x%x received\n",
2722 				pring->ringno, Rctl, Type);
2723 
2724 	return 1;
2725 }
2726 
2727 /**
2728  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2729  * @phba: Pointer to HBA context object.
2730  * @pring: Pointer to driver SLI ring object.
2731  * @prspiocb: Pointer to response iocb object.
2732  *
2733  * This function looks up the iocb_lookup table to get the command iocb
2734  * corresponding to the given response iocb using the iotag of the
2735  * response iocb. This function is called with the hbalock held.
2736  * This function returns the command iocb object if it finds the command
2737  * iocb else returns NULL.
2738  **/
2739 static struct lpfc_iocbq *
2740 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
2741 		      struct lpfc_sli_ring *pring,
2742 		      struct lpfc_iocbq *prspiocb)
2743 {
2744 	struct lpfc_iocbq *cmd_iocb = NULL;
2745 	uint16_t iotag;
2746 	lockdep_assert_held(&phba->hbalock);
2747 
2748 	iotag = prspiocb->iocb.ulpIoTag;
2749 
2750 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2751 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
2752 		if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2753 			/* remove from txcmpl queue list */
2754 			list_del_init(&cmd_iocb->list);
2755 			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2756 			return cmd_iocb;
2757 		}
2758 	}
2759 
2760 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2761 			"0317 iotag x%x is out of "
2762 			"range: max iotag x%x wd0 x%x\n",
2763 			iotag, phba->sli.last_iotag,
2764 			*(((uint32_t *) &prspiocb->iocb) + 7));
2765 	return NULL;
2766 }
2767 
2768 /**
2769  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2770  * @phba: Pointer to HBA context object.
2771  * @pring: Pointer to driver SLI ring object.
2772  * @iotag: IOCB tag.
2773  *
2774  * This function looks up the iocb_lookup table to get the command iocb
2775  * corresponding to the given iotag. This function is called with the
2776  * hbalock held.
2777  * This function returns the command iocb object if it finds the command
2778  * iocb else returns NULL.
2779  **/
2780 static struct lpfc_iocbq *
2781 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
2782 			     struct lpfc_sli_ring *pring, uint16_t iotag)
2783 {
2784 	struct lpfc_iocbq *cmd_iocb = NULL;
2785 
2786 	lockdep_assert_held(&phba->hbalock);
2787 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
2788 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
2789 		if (cmd_iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ) {
2790 			/* remove from txcmpl queue list */
2791 			list_del_init(&cmd_iocb->list);
2792 			cmd_iocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ;
2793 			return cmd_iocb;
2794 		}
2795 	}
2796 
2797 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2798 			"0372 iotag x%x lookup error: max iotag (x%x) "
2799 			"iocb_flag x%x\n",
2800 			iotag, phba->sli.last_iotag,
2801 			cmd_iocb ? cmd_iocb->iocb_flag : 0xffff);
2802 	return NULL;
2803 }
2804 
2805 /**
2806  * lpfc_sli_process_sol_iocb - process solicited iocb completion
2807  * @phba: Pointer to HBA context object.
2808  * @pring: Pointer to driver SLI ring object.
2809  * @saveq: Pointer to the response iocb to be processed.
2810  *
2811  * This function is called by the ring event handler for non-fcp
2812  * rings when there is a new response iocb in the response ring.
2813  * The caller is not required to hold any locks. This function
2814  * gets the command iocb associated with the response iocb and
2815  * calls the completion handler for the command iocb. If there
2816  * is no completion handler, the function will free the resources
2817  * associated with command iocb. If the response iocb is for
2818  * an already aborted command iocb, the status of the completion
2819  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2820  * This function always returns 1.
2821  **/
2822 static int
2823 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2824 			  struct lpfc_iocbq *saveq)
2825 {
2826 	struct lpfc_iocbq *cmdiocbp;
2827 	int rc = 1;
2828 	unsigned long iflag;
2829 
2830 	/* Based on the iotag field, get the cmd IOCB from the txcmplq */
2831 	spin_lock_irqsave(&phba->hbalock, iflag);
2832 	cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
2833 	spin_unlock_irqrestore(&phba->hbalock, iflag);
2834 
2835 	if (cmdiocbp) {
2836 		if (cmdiocbp->iocb_cmpl) {
2837 			/*
2838 			 * If an ELS command failed send an event to mgmt
2839 			 * application.
2840 			 */
2841 			if (saveq->iocb.ulpStatus &&
2842 			     (pring->ringno == LPFC_ELS_RING) &&
2843 			     (cmdiocbp->iocb.ulpCommand ==
2844 				CMD_ELS_REQUEST64_CR))
2845 				lpfc_send_els_failure_event(phba,
2846 					cmdiocbp, saveq);
2847 
2848 			/*
2849 			 * Post all ELS completions to the worker thread.
2850 			 * All other are passed to the completion callback.
2851 			 */
2852 			if (pring->ringno == LPFC_ELS_RING) {
2853 				if ((phba->sli_rev < LPFC_SLI_REV4) &&
2854 				    (cmdiocbp->iocb_flag &
2855 							LPFC_DRIVER_ABORTED)) {
2856 					spin_lock_irqsave(&phba->hbalock,
2857 							  iflag);
2858 					cmdiocbp->iocb_flag &=
2859 						~LPFC_DRIVER_ABORTED;
2860 					spin_unlock_irqrestore(&phba->hbalock,
2861 							       iflag);
2862 					saveq->iocb.ulpStatus =
2863 						IOSTAT_LOCAL_REJECT;
2864 					saveq->iocb.un.ulpWord[4] =
2865 						IOERR_SLI_ABORTED;
2866 
2867 					/* Firmware could still be in progress
2868 					 * of DMAing payload, so don't free data
2869 					 * buffer till after a hbeat.
2870 					 */
2871 					spin_lock_irqsave(&phba->hbalock,
2872 							  iflag);
2873 					saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
2874 					spin_unlock_irqrestore(&phba->hbalock,
2875 							       iflag);
2876 				}
2877 				if (phba->sli_rev == LPFC_SLI_REV4) {
2878 					if (saveq->iocb_flag &
2879 					    LPFC_EXCHANGE_BUSY) {
2880 						/* Set cmdiocb flag for the
2881 						 * exchange busy so sgl (xri)
2882 						 * will not be released until
2883 						 * the abort xri is received
2884 						 * from hba.
2885 						 */
2886 						spin_lock_irqsave(
2887 							&phba->hbalock, iflag);
2888 						cmdiocbp->iocb_flag |=
2889 							LPFC_EXCHANGE_BUSY;
2890 						spin_unlock_irqrestore(
2891 							&phba->hbalock, iflag);
2892 					}
2893 					if (cmdiocbp->iocb_flag &
2894 					    LPFC_DRIVER_ABORTED) {
2895 						/*
2896 						 * Clear LPFC_DRIVER_ABORTED
2897 						 * bit in case it was driver
2898 						 * initiated abort.
2899 						 */
2900 						spin_lock_irqsave(
2901 							&phba->hbalock, iflag);
2902 						cmdiocbp->iocb_flag &=
2903 							~LPFC_DRIVER_ABORTED;
2904 						spin_unlock_irqrestore(
2905 							&phba->hbalock, iflag);
2906 						cmdiocbp->iocb.ulpStatus =
2907 							IOSTAT_LOCAL_REJECT;
2908 						cmdiocbp->iocb.un.ulpWord[4] =
2909 							IOERR_ABORT_REQUESTED;
2910 						/*
2911 						 * For SLI4, irsiocb contains
2912 						 * NO_XRI in sli_xritag, it
2913 						 * shall not affect releasing
2914 						 * sgl (xri) process.
2915 						 */
2916 						saveq->iocb.ulpStatus =
2917 							IOSTAT_LOCAL_REJECT;
2918 						saveq->iocb.un.ulpWord[4] =
2919 							IOERR_SLI_ABORTED;
2920 						spin_lock_irqsave(
2921 							&phba->hbalock, iflag);
2922 						saveq->iocb_flag |=
2923 							LPFC_DELAY_MEM_FREE;
2924 						spin_unlock_irqrestore(
2925 							&phba->hbalock, iflag);
2926 					}
2927 				}
2928 			}
2929 			(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
2930 		} else
2931 			lpfc_sli_release_iocbq(phba, cmdiocbp);
2932 	} else {
2933 		/*
2934 		 * Unknown initiating command based on the response iotag.
2935 		 * This could be the case on the ELS ring because of
2936 		 * lpfc_els_abort().
2937 		 */
2938 		if (pring->ringno != LPFC_ELS_RING) {
2939 			/*
2940 			 * Ring <ringno> handler: unexpected completion IoTag
2941 			 * <IoTag>
2942 			 */
2943 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2944 					 "0322 Ring %d handler: "
2945 					 "unexpected completion IoTag x%x "
2946 					 "Data: x%x x%x x%x x%x\n",
2947 					 pring->ringno,
2948 					 saveq->iocb.ulpIoTag,
2949 					 saveq->iocb.ulpStatus,
2950 					 saveq->iocb.un.ulpWord[4],
2951 					 saveq->iocb.ulpCommand,
2952 					 saveq->iocb.ulpContext);
2953 		}
2954 	}
2955 
2956 	return rc;
2957 }
2958 
2959 /**
2960  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2961  * @phba: Pointer to HBA context object.
2962  * @pring: Pointer to driver SLI ring object.
2963  *
2964  * This function is called from the iocb ring event handlers when
2965  * put pointer is ahead of the get pointer for a ring. This function signal
2966  * an error attention condition to the worker thread and the worker
2967  * thread will transition the HBA to offline state.
2968  **/
2969 static void
2970 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2971 {
2972 	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2973 	/*
2974 	 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2975 	 * rsp ring <portRspMax>
2976 	 */
2977 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2978 			"0312 Ring %d handler: portRspPut %d "
2979 			"is bigger than rsp ring %d\n",
2980 			pring->ringno, le32_to_cpu(pgp->rspPutInx),
2981 			pring->sli.sli3.numRiocb);
2982 
2983 	phba->link_state = LPFC_HBA_ERROR;
2984 
2985 	/*
2986 	 * All error attention handlers are posted to
2987 	 * worker thread
2988 	 */
2989 	phba->work_ha |= HA_ERATT;
2990 	phba->work_hs = HS_FFER3;
2991 
2992 	lpfc_worker_wake_up(phba);
2993 
2994 	return;
2995 }
2996 
2997 /**
2998  * lpfc_poll_eratt - Error attention polling timer timeout handler
2999  * @ptr: Pointer to address of HBA context object.
3000  *
3001  * This function is invoked by the Error Attention polling timer when the
3002  * timer times out. It will check the SLI Error Attention register for
3003  * possible attention events. If so, it will post an Error Attention event
3004  * and wake up worker thread to process it. Otherwise, it will set up the
3005  * Error Attention polling timer for the next poll.
3006  **/
3007 void lpfc_poll_eratt(unsigned long ptr)
3008 {
3009 	struct lpfc_hba *phba;
3010 	uint32_t eratt = 0;
3011 	uint64_t sli_intr, cnt;
3012 
3013 	phba = (struct lpfc_hba *)ptr;
3014 
3015 	/* Here we will also keep track of interrupts per sec of the hba */
3016 	sli_intr = phba->sli.slistat.sli_intr;
3017 
3018 	if (phba->sli.slistat.sli_prev_intr > sli_intr)
3019 		cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3020 			sli_intr);
3021 	else
3022 		cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3023 
3024 	/* 64-bit integer division not supported on 32-bit x86 - use do_div */
3025 	do_div(cnt, phba->eratt_poll_interval);
3026 	phba->sli.slistat.sli_ips = cnt;
3027 
3028 	phba->sli.slistat.sli_prev_intr = sli_intr;
3029 
3030 	/* Check chip HA register for error event */
3031 	eratt = lpfc_sli_check_eratt(phba);
3032 
3033 	if (eratt)
3034 		/* Tell the worker thread there is work to do */
3035 		lpfc_worker_wake_up(phba);
3036 	else
3037 		/* Restart the timer for next eratt poll */
3038 		mod_timer(&phba->eratt_poll,
3039 			  jiffies +
3040 			  msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3041 	return;
3042 }
3043 
3044 
3045 /**
3046  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3047  * @phba: Pointer to HBA context object.
3048  * @pring: Pointer to driver SLI ring object.
3049  * @mask: Host attention register mask for this ring.
3050  *
3051  * This function is called from the interrupt context when there is a ring
3052  * event for the fcp ring. The caller does not hold any lock.
3053  * The function processes each response iocb in the response ring until it
3054  * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3055  * LE bit set. The function will call the completion handler of the command iocb
3056  * if the response iocb indicates a completion for a command iocb or it is
3057  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3058  * function if this is an unsolicited iocb.
3059  * This routine presumes LPFC_FCP_RING handling and doesn't bother
3060  * to check it explicitly.
3061  */
3062 int
3063 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3064 				struct lpfc_sli_ring *pring, uint32_t mask)
3065 {
3066 	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3067 	IOCB_t *irsp = NULL;
3068 	IOCB_t *entry = NULL;
3069 	struct lpfc_iocbq *cmdiocbq = NULL;
3070 	struct lpfc_iocbq rspiocbq;
3071 	uint32_t status;
3072 	uint32_t portRspPut, portRspMax;
3073 	int rc = 1;
3074 	lpfc_iocb_type type;
3075 	unsigned long iflag;
3076 	uint32_t rsp_cmpl = 0;
3077 
3078 	spin_lock_irqsave(&phba->hbalock, iflag);
3079 	pring->stats.iocb_event++;
3080 
3081 	/*
3082 	 * The next available response entry should never exceed the maximum
3083 	 * entries.  If it does, treat it as an adapter hardware error.
3084 	 */
3085 	portRspMax = pring->sli.sli3.numRiocb;
3086 	portRspPut = le32_to_cpu(pgp->rspPutInx);
3087 	if (unlikely(portRspPut >= portRspMax)) {
3088 		lpfc_sli_rsp_pointers_error(phba, pring);
3089 		spin_unlock_irqrestore(&phba->hbalock, iflag);
3090 		return 1;
3091 	}
3092 	if (phba->fcp_ring_in_use) {
3093 		spin_unlock_irqrestore(&phba->hbalock, iflag);
3094 		return 1;
3095 	} else
3096 		phba->fcp_ring_in_use = 1;
3097 
3098 	rmb();
3099 	while (pring->sli.sli3.rspidx != portRspPut) {
3100 		/*
3101 		 * Fetch an entry off the ring and copy it into a local data
3102 		 * structure.  The copy involves a byte-swap since the
3103 		 * network byte order and pci byte orders are different.
3104 		 */
3105 		entry = lpfc_resp_iocb(phba, pring);
3106 		phba->last_completion_time = jiffies;
3107 
3108 		if (++pring->sli.sli3.rspidx >= portRspMax)
3109 			pring->sli.sli3.rspidx = 0;
3110 
3111 		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
3112 				      (uint32_t *) &rspiocbq.iocb,
3113 				      phba->iocb_rsp_size);
3114 		INIT_LIST_HEAD(&(rspiocbq.list));
3115 		irsp = &rspiocbq.iocb;
3116 
3117 		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
3118 		pring->stats.iocb_rsp++;
3119 		rsp_cmpl++;
3120 
3121 		if (unlikely(irsp->ulpStatus)) {
3122 			/*
3123 			 * If resource errors reported from HBA, reduce
3124 			 * queuedepths of the SCSI device.
3125 			 */
3126 			if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3127 			    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3128 			     IOERR_NO_RESOURCES)) {
3129 				spin_unlock_irqrestore(&phba->hbalock, iflag);
3130 				phba->lpfc_rampdown_queue_depth(phba);
3131 				spin_lock_irqsave(&phba->hbalock, iflag);
3132 			}
3133 
3134 			/* Rsp ring <ringno> error: IOCB */
3135 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3136 					"0336 Rsp Ring %d error: IOCB Data: "
3137 					"x%x x%x x%x x%x x%x x%x x%x x%x\n",
3138 					pring->ringno,
3139 					irsp->un.ulpWord[0],
3140 					irsp->un.ulpWord[1],
3141 					irsp->un.ulpWord[2],
3142 					irsp->un.ulpWord[3],
3143 					irsp->un.ulpWord[4],
3144 					irsp->un.ulpWord[5],
3145 					*(uint32_t *)&irsp->un1,
3146 					*((uint32_t *)&irsp->un1 + 1));
3147 		}
3148 
3149 		switch (type) {
3150 		case LPFC_ABORT_IOCB:
3151 		case LPFC_SOL_IOCB:
3152 			/*
3153 			 * Idle exchange closed via ABTS from port.  No iocb
3154 			 * resources need to be recovered.
3155 			 */
3156 			if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
3157 				lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3158 						"0333 IOCB cmd 0x%x"
3159 						" processed. Skipping"
3160 						" completion\n",
3161 						irsp->ulpCommand);
3162 				break;
3163 			}
3164 
3165 			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
3166 							 &rspiocbq);
3167 			if (unlikely(!cmdiocbq))
3168 				break;
3169 			if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
3170 				cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3171 			if (cmdiocbq->iocb_cmpl) {
3172 				spin_unlock_irqrestore(&phba->hbalock, iflag);
3173 				(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
3174 						      &rspiocbq);
3175 				spin_lock_irqsave(&phba->hbalock, iflag);
3176 			}
3177 			break;
3178 		case LPFC_UNSOL_IOCB:
3179 			spin_unlock_irqrestore(&phba->hbalock, iflag);
3180 			lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
3181 			spin_lock_irqsave(&phba->hbalock, iflag);
3182 			break;
3183 		default:
3184 			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3185 				char adaptermsg[LPFC_MAX_ADPTMSG];
3186 				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3187 				memcpy(&adaptermsg[0], (uint8_t *) irsp,
3188 				       MAX_MSG_DATA);
3189 				dev_warn(&((phba->pcidev)->dev),
3190 					 "lpfc%d: %s\n",
3191 					 phba->brd_no, adaptermsg);
3192 			} else {
3193 				/* Unknown IOCB command */
3194 				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3195 						"0334 Unknown IOCB command "
3196 						"Data: x%x, x%x x%x x%x x%x\n",
3197 						type, irsp->ulpCommand,
3198 						irsp->ulpStatus,
3199 						irsp->ulpIoTag,
3200 						irsp->ulpContext);
3201 			}
3202 			break;
3203 		}
3204 
3205 		/*
3206 		 * The response IOCB has been processed.  Update the ring
3207 		 * pointer in SLIM.  If the port response put pointer has not
3208 		 * been updated, sync the pgp->rspPutInx and fetch the new port
3209 		 * response put pointer.
3210 		 */
3211 		writel(pring->sli.sli3.rspidx,
3212 			&phba->host_gp[pring->ringno].rspGetInx);
3213 
3214 		if (pring->sli.sli3.rspidx == portRspPut)
3215 			portRspPut = le32_to_cpu(pgp->rspPutInx);
3216 	}
3217 
3218 	if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
3219 		pring->stats.iocb_rsp_full++;
3220 		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3221 		writel(status, phba->CAregaddr);
3222 		readl(phba->CAregaddr);
3223 	}
3224 	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3225 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3226 		pring->stats.iocb_cmd_empty++;
3227 
3228 		/* Force update of the local copy of cmdGetInx */
3229 		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3230 		lpfc_sli_resume_iocb(phba, pring);
3231 
3232 		if ((pring->lpfc_sli_cmd_available))
3233 			(pring->lpfc_sli_cmd_available) (phba, pring);
3234 
3235 	}
3236 
3237 	phba->fcp_ring_in_use = 0;
3238 	spin_unlock_irqrestore(&phba->hbalock, iflag);
3239 	return rc;
3240 }
3241 
3242 /**
3243  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3244  * @phba: Pointer to HBA context object.
3245  * @pring: Pointer to driver SLI ring object.
3246  * @rspiocbp: Pointer to driver response IOCB object.
3247  *
3248  * This function is called from the worker thread when there is a slow-path
3249  * response IOCB to process. This function chains all the response iocbs until
3250  * seeing the iocb with the LE bit set. The function will call
3251  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3252  * completion of a command iocb. The function will call the
3253  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3254  * The function frees the resources or calls the completion handler if this
3255  * iocb is an abort completion. The function returns NULL when the response
3256  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3257  * this function shall chain the iocb on to the iocb_continueq and return the
3258  * response iocb passed in.
3259  **/
3260 static struct lpfc_iocbq *
3261 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3262 			struct lpfc_iocbq *rspiocbp)
3263 {
3264 	struct lpfc_iocbq *saveq;
3265 	struct lpfc_iocbq *cmdiocbp;
3266 	struct lpfc_iocbq *next_iocb;
3267 	IOCB_t *irsp = NULL;
3268 	uint32_t free_saveq;
3269 	uint8_t iocb_cmd_type;
3270 	lpfc_iocb_type type;
3271 	unsigned long iflag;
3272 	int rc;
3273 
3274 	spin_lock_irqsave(&phba->hbalock, iflag);
3275 	/* First add the response iocb to the countinueq list */
3276 	list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
3277 	pring->iocb_continueq_cnt++;
3278 
3279 	/* Now, determine whether the list is completed for processing */
3280 	irsp = &rspiocbp->iocb;
3281 	if (irsp->ulpLe) {
3282 		/*
3283 		 * By default, the driver expects to free all resources
3284 		 * associated with this iocb completion.
3285 		 */
3286 		free_saveq = 1;
3287 		saveq = list_get_first(&pring->iocb_continueq,
3288 				       struct lpfc_iocbq, list);
3289 		irsp = &(saveq->iocb);
3290 		list_del_init(&pring->iocb_continueq);
3291 		pring->iocb_continueq_cnt = 0;
3292 
3293 		pring->stats.iocb_rsp++;
3294 
3295 		/*
3296 		 * If resource errors reported from HBA, reduce
3297 		 * queuedepths of the SCSI device.
3298 		 */
3299 		if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
3300 		    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
3301 		     IOERR_NO_RESOURCES)) {
3302 			spin_unlock_irqrestore(&phba->hbalock, iflag);
3303 			phba->lpfc_rampdown_queue_depth(phba);
3304 			spin_lock_irqsave(&phba->hbalock, iflag);
3305 		}
3306 
3307 		if (irsp->ulpStatus) {
3308 			/* Rsp ring <ringno> error: IOCB */
3309 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3310 					"0328 Rsp Ring %d error: "
3311 					"IOCB Data: "
3312 					"x%x x%x x%x x%x "
3313 					"x%x x%x x%x x%x "
3314 					"x%x x%x x%x x%x "
3315 					"x%x x%x x%x x%x\n",
3316 					pring->ringno,
3317 					irsp->un.ulpWord[0],
3318 					irsp->un.ulpWord[1],
3319 					irsp->un.ulpWord[2],
3320 					irsp->un.ulpWord[3],
3321 					irsp->un.ulpWord[4],
3322 					irsp->un.ulpWord[5],
3323 					*(((uint32_t *) irsp) + 6),
3324 					*(((uint32_t *) irsp) + 7),
3325 					*(((uint32_t *) irsp) + 8),
3326 					*(((uint32_t *) irsp) + 9),
3327 					*(((uint32_t *) irsp) + 10),
3328 					*(((uint32_t *) irsp) + 11),
3329 					*(((uint32_t *) irsp) + 12),
3330 					*(((uint32_t *) irsp) + 13),
3331 					*(((uint32_t *) irsp) + 14),
3332 					*(((uint32_t *) irsp) + 15));
3333 		}
3334 
3335 		/*
3336 		 * Fetch the IOCB command type and call the correct completion
3337 		 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3338 		 * get freed back to the lpfc_iocb_list by the discovery
3339 		 * kernel thread.
3340 		 */
3341 		iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
3342 		type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
3343 		switch (type) {
3344 		case LPFC_SOL_IOCB:
3345 			spin_unlock_irqrestore(&phba->hbalock, iflag);
3346 			rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
3347 			spin_lock_irqsave(&phba->hbalock, iflag);
3348 			break;
3349 
3350 		case LPFC_UNSOL_IOCB:
3351 			spin_unlock_irqrestore(&phba->hbalock, iflag);
3352 			rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
3353 			spin_lock_irqsave(&phba->hbalock, iflag);
3354 			if (!rc)
3355 				free_saveq = 0;
3356 			break;
3357 
3358 		case LPFC_ABORT_IOCB:
3359 			cmdiocbp = NULL;
3360 			if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
3361 				cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
3362 								 saveq);
3363 			if (cmdiocbp) {
3364 				/* Call the specified completion routine */
3365 				if (cmdiocbp->iocb_cmpl) {
3366 					spin_unlock_irqrestore(&phba->hbalock,
3367 							       iflag);
3368 					(cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
3369 							      saveq);
3370 					spin_lock_irqsave(&phba->hbalock,
3371 							  iflag);
3372 				} else
3373 					__lpfc_sli_release_iocbq(phba,
3374 								 cmdiocbp);
3375 			}
3376 			break;
3377 
3378 		case LPFC_UNKNOWN_IOCB:
3379 			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
3380 				char adaptermsg[LPFC_MAX_ADPTMSG];
3381 				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
3382 				memcpy(&adaptermsg[0], (uint8_t *)irsp,
3383 				       MAX_MSG_DATA);
3384 				dev_warn(&((phba->pcidev)->dev),
3385 					 "lpfc%d: %s\n",
3386 					 phba->brd_no, adaptermsg);
3387 			} else {
3388 				/* Unknown IOCB command */
3389 				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3390 						"0335 Unknown IOCB "
3391 						"command Data: x%x "
3392 						"x%x x%x x%x\n",
3393 						irsp->ulpCommand,
3394 						irsp->ulpStatus,
3395 						irsp->ulpIoTag,
3396 						irsp->ulpContext);
3397 			}
3398 			break;
3399 		}
3400 
3401 		if (free_saveq) {
3402 			list_for_each_entry_safe(rspiocbp, next_iocb,
3403 						 &saveq->list, list) {
3404 				list_del_init(&rspiocbp->list);
3405 				__lpfc_sli_release_iocbq(phba, rspiocbp);
3406 			}
3407 			__lpfc_sli_release_iocbq(phba, saveq);
3408 		}
3409 		rspiocbp = NULL;
3410 	}
3411 	spin_unlock_irqrestore(&phba->hbalock, iflag);
3412 	return rspiocbp;
3413 }
3414 
3415 /**
3416  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3417  * @phba: Pointer to HBA context object.
3418  * @pring: Pointer to driver SLI ring object.
3419  * @mask: Host attention register mask for this ring.
3420  *
3421  * This routine wraps the actual slow_ring event process routine from the
3422  * API jump table function pointer from the lpfc_hba struct.
3423  **/
3424 void
3425 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
3426 				struct lpfc_sli_ring *pring, uint32_t mask)
3427 {
3428 	phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
3429 }
3430 
3431 /**
3432  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3433  * @phba: Pointer to HBA context object.
3434  * @pring: Pointer to driver SLI ring object.
3435  * @mask: Host attention register mask for this ring.
3436  *
3437  * This function is called from the worker thread when there is a ring event
3438  * for non-fcp rings. The caller does not hold any lock. The function will
3439  * remove each response iocb in the response ring and calls the handle
3440  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3441  **/
3442 static void
3443 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
3444 				   struct lpfc_sli_ring *pring, uint32_t mask)
3445 {
3446 	struct lpfc_pgp *pgp;
3447 	IOCB_t *entry;
3448 	IOCB_t *irsp = NULL;
3449 	struct lpfc_iocbq *rspiocbp = NULL;
3450 	uint32_t portRspPut, portRspMax;
3451 	unsigned long iflag;
3452 	uint32_t status;
3453 
3454 	pgp = &phba->port_gp[pring->ringno];
3455 	spin_lock_irqsave(&phba->hbalock, iflag);
3456 	pring->stats.iocb_event++;
3457 
3458 	/*
3459 	 * The next available response entry should never exceed the maximum
3460 	 * entries.  If it does, treat it as an adapter hardware error.
3461 	 */
3462 	portRspMax = pring->sli.sli3.numRiocb;
3463 	portRspPut = le32_to_cpu(pgp->rspPutInx);
3464 	if (portRspPut >= portRspMax) {
3465 		/*
3466 		 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3467 		 * rsp ring <portRspMax>
3468 		 */
3469 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3470 				"0303 Ring %d handler: portRspPut %d "
3471 				"is bigger than rsp ring %d\n",
3472 				pring->ringno, portRspPut, portRspMax);
3473 
3474 		phba->link_state = LPFC_HBA_ERROR;
3475 		spin_unlock_irqrestore(&phba->hbalock, iflag);
3476 
3477 		phba->work_hs = HS_FFER3;
3478 		lpfc_handle_eratt(phba);
3479 
3480 		return;
3481 	}
3482 
3483 	rmb();
3484 	while (pring->sli.sli3.rspidx != portRspPut) {
3485 		/*
3486 		 * Build a completion list and call the appropriate handler.
3487 		 * The process is to get the next available response iocb, get
3488 		 * a free iocb from the list, copy the response data into the
3489 		 * free iocb, insert to the continuation list, and update the
3490 		 * next response index to slim.  This process makes response
3491 		 * iocb's in the ring available to DMA as fast as possible but
3492 		 * pays a penalty for a copy operation.  Since the iocb is
3493 		 * only 32 bytes, this penalty is considered small relative to
3494 		 * the PCI reads for register values and a slim write.  When
3495 		 * the ulpLe field is set, the entire Command has been
3496 		 * received.
3497 		 */
3498 		entry = lpfc_resp_iocb(phba, pring);
3499 
3500 		phba->last_completion_time = jiffies;
3501 		rspiocbp = __lpfc_sli_get_iocbq(phba);
3502 		if (rspiocbp == NULL) {
3503 			printk(KERN_ERR "%s: out of buffers! Failing "
3504 			       "completion.\n", __func__);
3505 			break;
3506 		}
3507 
3508 		lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
3509 				      phba->iocb_rsp_size);
3510 		irsp = &rspiocbp->iocb;
3511 
3512 		if (++pring->sli.sli3.rspidx >= portRspMax)
3513 			pring->sli.sli3.rspidx = 0;
3514 
3515 		if (pring->ringno == LPFC_ELS_RING) {
3516 			lpfc_debugfs_slow_ring_trc(phba,
3517 			"IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
3518 				*(((uint32_t *) irsp) + 4),
3519 				*(((uint32_t *) irsp) + 6),
3520 				*(((uint32_t *) irsp) + 7));
3521 		}
3522 
3523 		writel(pring->sli.sli3.rspidx,
3524 			&phba->host_gp[pring->ringno].rspGetInx);
3525 
3526 		spin_unlock_irqrestore(&phba->hbalock, iflag);
3527 		/* Handle the response IOCB */
3528 		rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
3529 		spin_lock_irqsave(&phba->hbalock, iflag);
3530 
3531 		/*
3532 		 * If the port response put pointer has not been updated, sync
3533 		 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3534 		 * response put pointer.
3535 		 */
3536 		if (pring->sli.sli3.rspidx == portRspPut) {
3537 			portRspPut = le32_to_cpu(pgp->rspPutInx);
3538 		}
3539 	} /* while (pring->sli.sli3.rspidx != portRspPut) */
3540 
3541 	if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
3542 		/* At least one response entry has been freed */
3543 		pring->stats.iocb_rsp_full++;
3544 		/* SET RxRE_RSP in Chip Att register */
3545 		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
3546 		writel(status, phba->CAregaddr);
3547 		readl(phba->CAregaddr); /* flush */
3548 	}
3549 	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
3550 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
3551 		pring->stats.iocb_cmd_empty++;
3552 
3553 		/* Force update of the local copy of cmdGetInx */
3554 		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
3555 		lpfc_sli_resume_iocb(phba, pring);
3556 
3557 		if ((pring->lpfc_sli_cmd_available))
3558 			(pring->lpfc_sli_cmd_available) (phba, pring);
3559 
3560 	}
3561 
3562 	spin_unlock_irqrestore(&phba->hbalock, iflag);
3563 	return;
3564 }
3565 
3566 /**
3567  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3568  * @phba: Pointer to HBA context object.
3569  * @pring: Pointer to driver SLI ring object.
3570  * @mask: Host attention register mask for this ring.
3571  *
3572  * This function is called from the worker thread when there is a pending
3573  * ELS response iocb on the driver internal slow-path response iocb worker
3574  * queue. The caller does not hold any lock. The function will remove each
3575  * response iocb from the response worker queue and calls the handle
3576  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3577  **/
3578 static void
3579 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
3580 				   struct lpfc_sli_ring *pring, uint32_t mask)
3581 {
3582 	struct lpfc_iocbq *irspiocbq;
3583 	struct hbq_dmabuf *dmabuf;
3584 	struct lpfc_cq_event *cq_event;
3585 	unsigned long iflag;
3586 
3587 	spin_lock_irqsave(&phba->hbalock, iflag);
3588 	phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
3589 	spin_unlock_irqrestore(&phba->hbalock, iflag);
3590 	while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
3591 		/* Get the response iocb from the head of work queue */
3592 		spin_lock_irqsave(&phba->hbalock, iflag);
3593 		list_remove_head(&phba->sli4_hba.sp_queue_event,
3594 				 cq_event, struct lpfc_cq_event, list);
3595 		spin_unlock_irqrestore(&phba->hbalock, iflag);
3596 
3597 		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
3598 		case CQE_CODE_COMPL_WQE:
3599 			irspiocbq = container_of(cq_event, struct lpfc_iocbq,
3600 						 cq_event);
3601 			/* Translate ELS WCQE to response IOCBQ */
3602 			irspiocbq = lpfc_sli4_els_wcqe_to_rspiocbq(phba,
3603 								   irspiocbq);
3604 			if (irspiocbq)
3605 				lpfc_sli_sp_handle_rspiocb(phba, pring,
3606 							   irspiocbq);
3607 			break;
3608 		case CQE_CODE_RECEIVE:
3609 		case CQE_CODE_RECEIVE_V1:
3610 			dmabuf = container_of(cq_event, struct hbq_dmabuf,
3611 					      cq_event);
3612 			lpfc_sli4_handle_received_buffer(phba, dmabuf);
3613 			break;
3614 		default:
3615 			break;
3616 		}
3617 	}
3618 }
3619 
3620 /**
3621  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3622  * @phba: Pointer to HBA context object.
3623  * @pring: Pointer to driver SLI ring object.
3624  *
3625  * This function aborts all iocbs in the given ring and frees all the iocb
3626  * objects in txq. This function issues an abort iocb for all the iocb commands
3627  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3628  * the return of this function. The caller is not required to hold any locks.
3629  **/
3630 void
3631 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3632 {
3633 	LIST_HEAD(completions);
3634 	struct lpfc_iocbq *iocb, *next_iocb;
3635 
3636 	if (pring->ringno == LPFC_ELS_RING) {
3637 		lpfc_fabric_abort_hba(phba);
3638 	}
3639 
3640 	/* Error everything on txq and txcmplq
3641 	 * First do the txq.
3642 	 */
3643 	if (phba->sli_rev >= LPFC_SLI_REV4) {
3644 		spin_lock_irq(&pring->ring_lock);
3645 		list_splice_init(&pring->txq, &completions);
3646 		pring->txq_cnt = 0;
3647 		spin_unlock_irq(&pring->ring_lock);
3648 
3649 		spin_lock_irq(&phba->hbalock);
3650 		/* Next issue ABTS for everything on the txcmplq */
3651 		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3652 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3653 		spin_unlock_irq(&phba->hbalock);
3654 	} else {
3655 		spin_lock_irq(&phba->hbalock);
3656 		list_splice_init(&pring->txq, &completions);
3657 		pring->txq_cnt = 0;
3658 
3659 		/* Next issue ABTS for everything on the txcmplq */
3660 		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3661 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3662 		spin_unlock_irq(&phba->hbalock);
3663 	}
3664 
3665 	/* Cancel all the IOCBs from the completions list */
3666 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
3667 			      IOERR_SLI_ABORTED);
3668 }
3669 
3670 /**
3671  * lpfc_sli_abort_wqe_ring - Abort all iocbs in the ring
3672  * @phba: Pointer to HBA context object.
3673  * @pring: Pointer to driver SLI ring object.
3674  *
3675  * This function aborts all iocbs in the given ring and frees all the iocb
3676  * objects in txq. This function issues an abort iocb for all the iocb commands
3677  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3678  * the return of this function. The caller is not required to hold any locks.
3679  **/
3680 void
3681 lpfc_sli_abort_wqe_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3682 {
3683 	LIST_HEAD(completions);
3684 	struct lpfc_iocbq *iocb, *next_iocb;
3685 
3686 	if (pring->ringno == LPFC_ELS_RING)
3687 		lpfc_fabric_abort_hba(phba);
3688 
3689 	spin_lock_irq(&phba->hbalock);
3690 	/* Next issue ABTS for everything on the txcmplq */
3691 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
3692 		lpfc_sli4_abort_nvme_io(phba, pring, iocb);
3693 	spin_unlock_irq(&phba->hbalock);
3694 }
3695 
3696 
3697 /**
3698  * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3699  * @phba: Pointer to HBA context object.
3700  * @pring: Pointer to driver SLI ring object.
3701  *
3702  * This function aborts all iocbs in FCP rings and frees all the iocb
3703  * objects in txq. This function issues an abort iocb for all the iocb commands
3704  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3705  * the return of this function. The caller is not required to hold any locks.
3706  **/
3707 void
3708 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
3709 {
3710 	struct lpfc_sli *psli = &phba->sli;
3711 	struct lpfc_sli_ring  *pring;
3712 	uint32_t i;
3713 
3714 	/* Look on all the FCP Rings for the iotag */
3715 	if (phba->sli_rev >= LPFC_SLI_REV4) {
3716 		for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3717 			pring = phba->sli4_hba.fcp_wq[i]->pring;
3718 			lpfc_sli_abort_iocb_ring(phba, pring);
3719 		}
3720 	} else {
3721 		pring = &psli->sli3_ring[LPFC_FCP_RING];
3722 		lpfc_sli_abort_iocb_ring(phba, pring);
3723 	}
3724 }
3725 
3726 /**
3727  * lpfc_sli_abort_nvme_rings - Abort all wqes in all NVME rings
3728  * @phba: Pointer to HBA context object.
3729  *
3730  * This function aborts all wqes in NVME rings. This function issues an
3731  * abort wqe for all the outstanding IO commands in txcmplq. The iocbs in
3732  * the txcmplq is not guaranteed to complete before the return of this
3733  * function. The caller is not required to hold any locks.
3734  **/
3735 void
3736 lpfc_sli_abort_nvme_rings(struct lpfc_hba *phba)
3737 {
3738 	struct lpfc_sli_ring  *pring;
3739 	uint32_t i;
3740 
3741 	if (phba->sli_rev < LPFC_SLI_REV4)
3742 		return;
3743 
3744 	/* Abort all IO on each NVME ring. */
3745 	for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3746 		pring = phba->sli4_hba.nvme_wq[i]->pring;
3747 		lpfc_sli_abort_wqe_ring(phba, pring);
3748 	}
3749 }
3750 
3751 
3752 /**
3753  * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3754  * @phba: Pointer to HBA context object.
3755  *
3756  * This function flushes all iocbs in the fcp ring and frees all the iocb
3757  * objects in txq and txcmplq. This function will not issue abort iocbs
3758  * for all the iocb commands in txcmplq, they will just be returned with
3759  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3760  * slot has been permanently disabled.
3761  **/
3762 void
3763 lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
3764 {
3765 	LIST_HEAD(txq);
3766 	LIST_HEAD(txcmplq);
3767 	struct lpfc_sli *psli = &phba->sli;
3768 	struct lpfc_sli_ring  *pring;
3769 	uint32_t i;
3770 
3771 	spin_lock_irq(&phba->hbalock);
3772 	/* Indicate the I/O queues are flushed */
3773 	phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
3774 	spin_unlock_irq(&phba->hbalock);
3775 
3776 	/* Look on all the FCP Rings for the iotag */
3777 	if (phba->sli_rev >= LPFC_SLI_REV4) {
3778 		for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
3779 			pring = phba->sli4_hba.fcp_wq[i]->pring;
3780 
3781 			spin_lock_irq(&pring->ring_lock);
3782 			/* Retrieve everything on txq */
3783 			list_splice_init(&pring->txq, &txq);
3784 			/* Retrieve everything on the txcmplq */
3785 			list_splice_init(&pring->txcmplq, &txcmplq);
3786 			pring->txq_cnt = 0;
3787 			pring->txcmplq_cnt = 0;
3788 			spin_unlock_irq(&pring->ring_lock);
3789 
3790 			/* Flush the txq */
3791 			lpfc_sli_cancel_iocbs(phba, &txq,
3792 					      IOSTAT_LOCAL_REJECT,
3793 					      IOERR_SLI_DOWN);
3794 			/* Flush the txcmpq */
3795 			lpfc_sli_cancel_iocbs(phba, &txcmplq,
3796 					      IOSTAT_LOCAL_REJECT,
3797 					      IOERR_SLI_DOWN);
3798 		}
3799 	} else {
3800 		pring = &psli->sli3_ring[LPFC_FCP_RING];
3801 
3802 		spin_lock_irq(&phba->hbalock);
3803 		/* Retrieve everything on txq */
3804 		list_splice_init(&pring->txq, &txq);
3805 		/* Retrieve everything on the txcmplq */
3806 		list_splice_init(&pring->txcmplq, &txcmplq);
3807 		pring->txq_cnt = 0;
3808 		pring->txcmplq_cnt = 0;
3809 		spin_unlock_irq(&phba->hbalock);
3810 
3811 		/* Flush the txq */
3812 		lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
3813 				      IOERR_SLI_DOWN);
3814 		/* Flush the txcmpq */
3815 		lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
3816 				      IOERR_SLI_DOWN);
3817 	}
3818 }
3819 
3820 /**
3821  * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings
3822  * @phba: Pointer to HBA context object.
3823  *
3824  * This function flushes all wqes in the nvme rings and frees all resources
3825  * in the txcmplq. This function does not issue abort wqes for the IO
3826  * commands in txcmplq, they will just be returned with
3827  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3828  * slot has been permanently disabled.
3829  **/
3830 void
3831 lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba)
3832 {
3833 	LIST_HEAD(txcmplq);
3834 	struct lpfc_sli_ring  *pring;
3835 	uint32_t i;
3836 
3837 	if (phba->sli_rev < LPFC_SLI_REV4)
3838 		return;
3839 
3840 	/* Hint to other driver operations that a flush is in progress. */
3841 	spin_lock_irq(&phba->hbalock);
3842 	phba->hba_flag |= HBA_NVME_IOQ_FLUSH;
3843 	spin_unlock_irq(&phba->hbalock);
3844 
3845 	/* Cycle through all NVME rings and complete each IO with
3846 	 * a local driver reason code.  This is a flush so no
3847 	 * abort exchange to FW.
3848 	 */
3849 	for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
3850 		pring = phba->sli4_hba.nvme_wq[i]->pring;
3851 
3852 		/* Retrieve everything on the txcmplq */
3853 		spin_lock_irq(&pring->ring_lock);
3854 		list_splice_init(&pring->txcmplq, &txcmplq);
3855 		pring->txcmplq_cnt = 0;
3856 		spin_unlock_irq(&pring->ring_lock);
3857 
3858 		/* Flush the txcmpq &&&PAE */
3859 		lpfc_sli_cancel_iocbs(phba, &txcmplq,
3860 				      IOSTAT_LOCAL_REJECT,
3861 				      IOERR_SLI_DOWN);
3862 	}
3863 }
3864 
3865 /**
3866  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
3867  * @phba: Pointer to HBA context object.
3868  * @mask: Bit mask to be checked.
3869  *
3870  * This function reads the host status register and compares
3871  * with the provided bit mask to check if HBA completed
3872  * the restart. This function will wait in a loop for the
3873  * HBA to complete restart. If the HBA does not restart within
3874  * 15 iterations, the function will reset the HBA again. The
3875  * function returns 1 when HBA fail to restart otherwise returns
3876  * zero.
3877  **/
3878 static int
3879 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
3880 {
3881 	uint32_t status;
3882 	int i = 0;
3883 	int retval = 0;
3884 
3885 	/* Read the HBA Host Status Register */
3886 	if (lpfc_readl(phba->HSregaddr, &status))
3887 		return 1;
3888 
3889 	/*
3890 	 * Check status register every 100ms for 5 retries, then every
3891 	 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3892 	 * every 2.5 sec for 4.
3893 	 * Break our of the loop if errors occurred during init.
3894 	 */
3895 	while (((status & mask) != mask) &&
3896 	       !(status & HS_FFERM) &&
3897 	       i++ < 20) {
3898 
3899 		if (i <= 5)
3900 			msleep(10);
3901 		else if (i <= 10)
3902 			msleep(500);
3903 		else
3904 			msleep(2500);
3905 
3906 		if (i == 15) {
3907 				/* Do post */
3908 			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3909 			lpfc_sli_brdrestart(phba);
3910 		}
3911 		/* Read the HBA Host Status Register */
3912 		if (lpfc_readl(phba->HSregaddr, &status)) {
3913 			retval = 1;
3914 			break;
3915 		}
3916 	}
3917 
3918 	/* Check to see if any errors occurred during init */
3919 	if ((status & HS_FFERM) || (i >= 20)) {
3920 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3921 				"2751 Adapter failed to restart, "
3922 				"status reg x%x, FW Data: A8 x%x AC x%x\n",
3923 				status,
3924 				readl(phba->MBslimaddr + 0xa8),
3925 				readl(phba->MBslimaddr + 0xac));
3926 		phba->link_state = LPFC_HBA_ERROR;
3927 		retval = 1;
3928 	}
3929 
3930 	return retval;
3931 }
3932 
3933 /**
3934  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3935  * @phba: Pointer to HBA context object.
3936  * @mask: Bit mask to be checked.
3937  *
3938  * This function checks the host status register to check if HBA is
3939  * ready. This function will wait in a loop for the HBA to be ready
3940  * If the HBA is not ready , the function will will reset the HBA PCI
3941  * function again. The function returns 1 when HBA fail to be ready
3942  * otherwise returns zero.
3943  **/
3944 static int
3945 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
3946 {
3947 	uint32_t status;
3948 	int retval = 0;
3949 
3950 	/* Read the HBA Host Status Register */
3951 	status = lpfc_sli4_post_status_check(phba);
3952 
3953 	if (status) {
3954 		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
3955 		lpfc_sli_brdrestart(phba);
3956 		status = lpfc_sli4_post_status_check(phba);
3957 	}
3958 
3959 	/* Check to see if any errors occurred during init */
3960 	if (status) {
3961 		phba->link_state = LPFC_HBA_ERROR;
3962 		retval = 1;
3963 	} else
3964 		phba->sli4_hba.intr_enable = 0;
3965 
3966 	return retval;
3967 }
3968 
3969 /**
3970  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3971  * @phba: Pointer to HBA context object.
3972  * @mask: Bit mask to be checked.
3973  *
3974  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3975  * from the API jump table function pointer from the lpfc_hba struct.
3976  **/
3977 int
3978 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
3979 {
3980 	return phba->lpfc_sli_brdready(phba, mask);
3981 }
3982 
3983 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3984 
3985 /**
3986  * lpfc_reset_barrier - Make HBA ready for HBA reset
3987  * @phba: Pointer to HBA context object.
3988  *
3989  * This function is called before resetting an HBA. This function is called
3990  * with hbalock held and requests HBA to quiesce DMAs before a reset.
3991  **/
3992 void lpfc_reset_barrier(struct lpfc_hba *phba)
3993 {
3994 	uint32_t __iomem *resp_buf;
3995 	uint32_t __iomem *mbox_buf;
3996 	volatile uint32_t mbox;
3997 	uint32_t hc_copy, ha_copy, resp_data;
3998 	int  i;
3999 	uint8_t hdrtype;
4000 
4001 	lockdep_assert_held(&phba->hbalock);
4002 
4003 	pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4004 	if (hdrtype != 0x80 ||
4005 	    (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4006 	     FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4007 		return;
4008 
4009 	/*
4010 	 * Tell the other part of the chip to suspend temporarily all
4011 	 * its DMA activity.
4012 	 */
4013 	resp_buf = phba->MBslimaddr;
4014 
4015 	/* Disable the error attention */
4016 	if (lpfc_readl(phba->HCregaddr, &hc_copy))
4017 		return;
4018 	writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4019 	readl(phba->HCregaddr); /* flush */
4020 	phba->link_flag |= LS_IGNORE_ERATT;
4021 
4022 	if (lpfc_readl(phba->HAregaddr, &ha_copy))
4023 		return;
4024 	if (ha_copy & HA_ERATT) {
4025 		/* Clear Chip error bit */
4026 		writel(HA_ERATT, phba->HAregaddr);
4027 		phba->pport->stopped = 1;
4028 	}
4029 
4030 	mbox = 0;
4031 	((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
4032 	((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
4033 
4034 	writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4035 	mbox_buf = phba->MBslimaddr;
4036 	writel(mbox, mbox_buf);
4037 
4038 	for (i = 0; i < 50; i++) {
4039 		if (lpfc_readl((resp_buf + 1), &resp_data))
4040 			return;
4041 		if (resp_data != ~(BARRIER_TEST_PATTERN))
4042 			mdelay(1);
4043 		else
4044 			break;
4045 	}
4046 	resp_data = 0;
4047 	if (lpfc_readl((resp_buf + 1), &resp_data))
4048 		return;
4049 	if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
4050 		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4051 		    phba->pport->stopped)
4052 			goto restore_hc;
4053 		else
4054 			goto clear_errat;
4055 	}
4056 
4057 	((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
4058 	resp_data = 0;
4059 	for (i = 0; i < 500; i++) {
4060 		if (lpfc_readl(resp_buf, &resp_data))
4061 			return;
4062 		if (resp_data != mbox)
4063 			mdelay(1);
4064 		else
4065 			break;
4066 	}
4067 
4068 clear_errat:
4069 
4070 	while (++i < 500) {
4071 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
4072 			return;
4073 		if (!(ha_copy & HA_ERATT))
4074 			mdelay(1);
4075 		else
4076 			break;
4077 	}
4078 
4079 	if (readl(phba->HAregaddr) & HA_ERATT) {
4080 		writel(HA_ERATT, phba->HAregaddr);
4081 		phba->pport->stopped = 1;
4082 	}
4083 
4084 restore_hc:
4085 	phba->link_flag &= ~LS_IGNORE_ERATT;
4086 	writel(hc_copy, phba->HCregaddr);
4087 	readl(phba->HCregaddr); /* flush */
4088 }
4089 
4090 /**
4091  * lpfc_sli_brdkill - Issue a kill_board mailbox command
4092  * @phba: Pointer to HBA context object.
4093  *
4094  * This function issues a kill_board mailbox command and waits for
4095  * the error attention interrupt. This function is called for stopping
4096  * the firmware processing. The caller is not required to hold any
4097  * locks. This function calls lpfc_hba_down_post function to free
4098  * any pending commands after the kill. The function will return 1 when it
4099  * fails to kill the board else will return 0.
4100  **/
4101 int
4102 lpfc_sli_brdkill(struct lpfc_hba *phba)
4103 {
4104 	struct lpfc_sli *psli;
4105 	LPFC_MBOXQ_t *pmb;
4106 	uint32_t status;
4107 	uint32_t ha_copy;
4108 	int retval;
4109 	int i = 0;
4110 
4111 	psli = &phba->sli;
4112 
4113 	/* Kill HBA */
4114 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4115 			"0329 Kill HBA Data: x%x x%x\n",
4116 			phba->pport->port_state, psli->sli_flag);
4117 
4118 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4119 	if (!pmb)
4120 		return 1;
4121 
4122 	/* Disable the error attention */
4123 	spin_lock_irq(&phba->hbalock);
4124 	if (lpfc_readl(phba->HCregaddr, &status)) {
4125 		spin_unlock_irq(&phba->hbalock);
4126 		mempool_free(pmb, phba->mbox_mem_pool);
4127 		return 1;
4128 	}
4129 	status &= ~HC_ERINT_ENA;
4130 	writel(status, phba->HCregaddr);
4131 	readl(phba->HCregaddr); /* flush */
4132 	phba->link_flag |= LS_IGNORE_ERATT;
4133 	spin_unlock_irq(&phba->hbalock);
4134 
4135 	lpfc_kill_board(phba, pmb);
4136 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4137 	retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4138 
4139 	if (retval != MBX_SUCCESS) {
4140 		if (retval != MBX_BUSY)
4141 			mempool_free(pmb, phba->mbox_mem_pool);
4142 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4143 				"2752 KILL_BOARD command failed retval %d\n",
4144 				retval);
4145 		spin_lock_irq(&phba->hbalock);
4146 		phba->link_flag &= ~LS_IGNORE_ERATT;
4147 		spin_unlock_irq(&phba->hbalock);
4148 		return 1;
4149 	}
4150 
4151 	spin_lock_irq(&phba->hbalock);
4152 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
4153 	spin_unlock_irq(&phba->hbalock);
4154 
4155 	mempool_free(pmb, phba->mbox_mem_pool);
4156 
4157 	/* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4158 	 * attention every 100ms for 3 seconds. If we don't get ERATT after
4159 	 * 3 seconds we still set HBA_ERROR state because the status of the
4160 	 * board is now undefined.
4161 	 */
4162 	if (lpfc_readl(phba->HAregaddr, &ha_copy))
4163 		return 1;
4164 	while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
4165 		mdelay(100);
4166 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
4167 			return 1;
4168 	}
4169 
4170 	del_timer_sync(&psli->mbox_tmo);
4171 	if (ha_copy & HA_ERATT) {
4172 		writel(HA_ERATT, phba->HAregaddr);
4173 		phba->pport->stopped = 1;
4174 	}
4175 	spin_lock_irq(&phba->hbalock);
4176 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4177 	psli->mbox_active = NULL;
4178 	phba->link_flag &= ~LS_IGNORE_ERATT;
4179 	spin_unlock_irq(&phba->hbalock);
4180 
4181 	lpfc_hba_down_post(phba);
4182 	phba->link_state = LPFC_HBA_ERROR;
4183 
4184 	return ha_copy & HA_ERATT ? 0 : 1;
4185 }
4186 
4187 /**
4188  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4189  * @phba: Pointer to HBA context object.
4190  *
4191  * This function resets the HBA by writing HC_INITFF to the control
4192  * register. After the HBA resets, this function resets all the iocb ring
4193  * indices. This function disables PCI layer parity checking during
4194  * the reset.
4195  * This function returns 0 always.
4196  * The caller is not required to hold any locks.
4197  **/
4198 int
4199 lpfc_sli_brdreset(struct lpfc_hba *phba)
4200 {
4201 	struct lpfc_sli *psli;
4202 	struct lpfc_sli_ring *pring;
4203 	uint16_t cfg_value;
4204 	int i;
4205 
4206 	psli = &phba->sli;
4207 
4208 	/* Reset HBA */
4209 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4210 			"0325 Reset HBA Data: x%x x%x\n",
4211 			(phba->pport) ? phba->pport->port_state : 0,
4212 			psli->sli_flag);
4213 
4214 	/* perform board reset */
4215 	phba->fc_eventTag = 0;
4216 	phba->link_events = 0;
4217 	if (phba->pport) {
4218 		phba->pport->fc_myDID = 0;
4219 		phba->pport->fc_prevDID = 0;
4220 	}
4221 
4222 	/* Turn off parity checking and serr during the physical reset */
4223 	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4224 	pci_write_config_word(phba->pcidev, PCI_COMMAND,
4225 			      (cfg_value &
4226 			       ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4227 
4228 	psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
4229 
4230 	/* Now toggle INITFF bit in the Host Control Register */
4231 	writel(HC_INITFF, phba->HCregaddr);
4232 	mdelay(1);
4233 	readl(phba->HCregaddr); /* flush */
4234 	writel(0, phba->HCregaddr);
4235 	readl(phba->HCregaddr); /* flush */
4236 
4237 	/* Restore PCI cmd register */
4238 	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4239 
4240 	/* Initialize relevant SLI info */
4241 	for (i = 0; i < psli->num_rings; i++) {
4242 		pring = &psli->sli3_ring[i];
4243 		pring->flag = 0;
4244 		pring->sli.sli3.rspidx = 0;
4245 		pring->sli.sli3.next_cmdidx  = 0;
4246 		pring->sli.sli3.local_getidx = 0;
4247 		pring->sli.sli3.cmdidx = 0;
4248 		pring->missbufcnt = 0;
4249 	}
4250 
4251 	phba->link_state = LPFC_WARM_START;
4252 	return 0;
4253 }
4254 
4255 /**
4256  * lpfc_sli4_brdreset - Reset a sli-4 HBA
4257  * @phba: Pointer to HBA context object.
4258  *
4259  * This function resets a SLI4 HBA. This function disables PCI layer parity
4260  * checking during resets the device. The caller is not required to hold
4261  * any locks.
4262  *
4263  * This function returns 0 always.
4264  **/
4265 int
4266 lpfc_sli4_brdreset(struct lpfc_hba *phba)
4267 {
4268 	struct lpfc_sli *psli = &phba->sli;
4269 	uint16_t cfg_value;
4270 	int rc = 0;
4271 
4272 	/* Reset HBA */
4273 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4274 			"0295 Reset HBA Data: x%x x%x x%x\n",
4275 			phba->pport->port_state, psli->sli_flag,
4276 			phba->hba_flag);
4277 
4278 	/* perform board reset */
4279 	phba->fc_eventTag = 0;
4280 	phba->link_events = 0;
4281 	phba->pport->fc_myDID = 0;
4282 	phba->pport->fc_prevDID = 0;
4283 
4284 	spin_lock_irq(&phba->hbalock);
4285 	psli->sli_flag &= ~(LPFC_PROCESS_LA);
4286 	phba->fcf.fcf_flag = 0;
4287 	spin_unlock_irq(&phba->hbalock);
4288 
4289 	/* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4290 	if (phba->hba_flag & HBA_FW_DUMP_OP) {
4291 		phba->hba_flag &= ~HBA_FW_DUMP_OP;
4292 		return rc;
4293 	}
4294 
4295 	/* Now physically reset the device */
4296 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4297 			"0389 Performing PCI function reset!\n");
4298 
4299 	/* Turn off parity checking and serr during the physical reset */
4300 	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
4301 	pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
4302 			      ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
4303 
4304 	/* Perform FCoE PCI function reset before freeing queue memory */
4305 	rc = lpfc_pci_function_reset(phba);
4306 
4307 	/* Restore PCI cmd register */
4308 	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
4309 
4310 	return rc;
4311 }
4312 
4313 /**
4314  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4315  * @phba: Pointer to HBA context object.
4316  *
4317  * This function is called in the SLI initialization code path to
4318  * restart the HBA. The caller is not required to hold any lock.
4319  * This function writes MBX_RESTART mailbox command to the SLIM and
4320  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4321  * function to free any pending commands. The function enables
4322  * POST only during the first initialization. The function returns zero.
4323  * The function does not guarantee completion of MBX_RESTART mailbox
4324  * command before the return of this function.
4325  **/
4326 static int
4327 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
4328 {
4329 	MAILBOX_t *mb;
4330 	struct lpfc_sli *psli;
4331 	volatile uint32_t word0;
4332 	void __iomem *to_slim;
4333 	uint32_t hba_aer_enabled;
4334 
4335 	spin_lock_irq(&phba->hbalock);
4336 
4337 	/* Take PCIe device Advanced Error Reporting (AER) state */
4338 	hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4339 
4340 	psli = &phba->sli;
4341 
4342 	/* Restart HBA */
4343 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4344 			"0337 Restart HBA Data: x%x x%x\n",
4345 			(phba->pport) ? phba->pport->port_state : 0,
4346 			psli->sli_flag);
4347 
4348 	word0 = 0;
4349 	mb = (MAILBOX_t *) &word0;
4350 	mb->mbxCommand = MBX_RESTART;
4351 	mb->mbxHc = 1;
4352 
4353 	lpfc_reset_barrier(phba);
4354 
4355 	to_slim = phba->MBslimaddr;
4356 	writel(*(uint32_t *) mb, to_slim);
4357 	readl(to_slim); /* flush */
4358 
4359 	/* Only skip post after fc_ffinit is completed */
4360 	if (phba->pport && phba->pport->port_state)
4361 		word0 = 1;	/* This is really setting up word1 */
4362 	else
4363 		word0 = 0;	/* This is really setting up word1 */
4364 	to_slim = phba->MBslimaddr + sizeof (uint32_t);
4365 	writel(*(uint32_t *) mb, to_slim);
4366 	readl(to_slim); /* flush */
4367 
4368 	lpfc_sli_brdreset(phba);
4369 	if (phba->pport)
4370 		phba->pport->stopped = 0;
4371 	phba->link_state = LPFC_INIT_START;
4372 	phba->hba_flag = 0;
4373 	spin_unlock_irq(&phba->hbalock);
4374 
4375 	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4376 	psli->stats_start = get_seconds();
4377 
4378 	/* Give the INITFF and Post time to settle. */
4379 	mdelay(100);
4380 
4381 	/* Reset HBA AER if it was enabled, note hba_flag was reset above */
4382 	if (hba_aer_enabled)
4383 		pci_disable_pcie_error_reporting(phba->pcidev);
4384 
4385 	lpfc_hba_down_post(phba);
4386 
4387 	return 0;
4388 }
4389 
4390 /**
4391  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4392  * @phba: Pointer to HBA context object.
4393  *
4394  * This function is called in the SLI initialization code path to restart
4395  * a SLI4 HBA. The caller is not required to hold any lock.
4396  * At the end of the function, it calls lpfc_hba_down_post function to
4397  * free any pending commands.
4398  **/
4399 static int
4400 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
4401 {
4402 	struct lpfc_sli *psli = &phba->sli;
4403 	uint32_t hba_aer_enabled;
4404 	int rc;
4405 
4406 	/* Restart HBA */
4407 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4408 			"0296 Restart HBA Data: x%x x%x\n",
4409 			phba->pport->port_state, psli->sli_flag);
4410 
4411 	/* Take PCIe device Advanced Error Reporting (AER) state */
4412 	hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED;
4413 
4414 	rc = lpfc_sli4_brdreset(phba);
4415 
4416 	spin_lock_irq(&phba->hbalock);
4417 	phba->pport->stopped = 0;
4418 	phba->link_state = LPFC_INIT_START;
4419 	phba->hba_flag = 0;
4420 	spin_unlock_irq(&phba->hbalock);
4421 
4422 	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
4423 	psli->stats_start = get_seconds();
4424 
4425 	/* Reset HBA AER if it was enabled, note hba_flag was reset above */
4426 	if (hba_aer_enabled)
4427 		pci_disable_pcie_error_reporting(phba->pcidev);
4428 
4429 	lpfc_hba_down_post(phba);
4430 	lpfc_sli4_queue_destroy(phba);
4431 
4432 	return rc;
4433 }
4434 
4435 /**
4436  * lpfc_sli_brdrestart - Wrapper func for restarting hba
4437  * @phba: Pointer to HBA context object.
4438  *
4439  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4440  * API jump table function pointer from the lpfc_hba struct.
4441 **/
4442 int
4443 lpfc_sli_brdrestart(struct lpfc_hba *phba)
4444 {
4445 	return phba->lpfc_sli_brdrestart(phba);
4446 }
4447 
4448 /**
4449  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4450  * @phba: Pointer to HBA context object.
4451  *
4452  * This function is called after a HBA restart to wait for successful
4453  * restart of the HBA. Successful restart of the HBA is indicated by
4454  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4455  * iteration, the function will restart the HBA again. The function returns
4456  * zero if HBA successfully restarted else returns negative error code.
4457  **/
4458 int
4459 lpfc_sli_chipset_init(struct lpfc_hba *phba)
4460 {
4461 	uint32_t status, i = 0;
4462 
4463 	/* Read the HBA Host Status Register */
4464 	if (lpfc_readl(phba->HSregaddr, &status))
4465 		return -EIO;
4466 
4467 	/* Check status register to see what current state is */
4468 	i = 0;
4469 	while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
4470 
4471 		/* Check every 10ms for 10 retries, then every 100ms for 90
4472 		 * retries, then every 1 sec for 50 retires for a total of
4473 		 * ~60 seconds before reset the board again and check every
4474 		 * 1 sec for 50 retries. The up to 60 seconds before the
4475 		 * board ready is required by the Falcon FIPS zeroization
4476 		 * complete, and any reset the board in between shall cause
4477 		 * restart of zeroization, further delay the board ready.
4478 		 */
4479 		if (i++ >= 200) {
4480 			/* Adapter failed to init, timeout, status reg
4481 			   <status> */
4482 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4483 					"0436 Adapter failed to init, "
4484 					"timeout, status reg x%x, "
4485 					"FW Data: A8 x%x AC x%x\n", status,
4486 					readl(phba->MBslimaddr + 0xa8),
4487 					readl(phba->MBslimaddr + 0xac));
4488 			phba->link_state = LPFC_HBA_ERROR;
4489 			return -ETIMEDOUT;
4490 		}
4491 
4492 		/* Check to see if any errors occurred during init */
4493 		if (status & HS_FFERM) {
4494 			/* ERROR: During chipset initialization */
4495 			/* Adapter failed to init, chipset, status reg
4496 			   <status> */
4497 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4498 					"0437 Adapter failed to init, "
4499 					"chipset, status reg x%x, "
4500 					"FW Data: A8 x%x AC x%x\n", status,
4501 					readl(phba->MBslimaddr + 0xa8),
4502 					readl(phba->MBslimaddr + 0xac));
4503 			phba->link_state = LPFC_HBA_ERROR;
4504 			return -EIO;
4505 		}
4506 
4507 		if (i <= 10)
4508 			msleep(10);
4509 		else if (i <= 100)
4510 			msleep(100);
4511 		else
4512 			msleep(1000);
4513 
4514 		if (i == 150) {
4515 			/* Do post */
4516 			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4517 			lpfc_sli_brdrestart(phba);
4518 		}
4519 		/* Read the HBA Host Status Register */
4520 		if (lpfc_readl(phba->HSregaddr, &status))
4521 			return -EIO;
4522 	}
4523 
4524 	/* Check to see if any errors occurred during init */
4525 	if (status & HS_FFERM) {
4526 		/* ERROR: During chipset initialization */
4527 		/* Adapter failed to init, chipset, status reg <status> */
4528 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4529 				"0438 Adapter failed to init, chipset, "
4530 				"status reg x%x, "
4531 				"FW Data: A8 x%x AC x%x\n", status,
4532 				readl(phba->MBslimaddr + 0xa8),
4533 				readl(phba->MBslimaddr + 0xac));
4534 		phba->link_state = LPFC_HBA_ERROR;
4535 		return -EIO;
4536 	}
4537 
4538 	/* Clear all interrupt enable conditions */
4539 	writel(0, phba->HCregaddr);
4540 	readl(phba->HCregaddr); /* flush */
4541 
4542 	/* setup host attn register */
4543 	writel(0xffffffff, phba->HAregaddr);
4544 	readl(phba->HAregaddr); /* flush */
4545 	return 0;
4546 }
4547 
4548 /**
4549  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4550  *
4551  * This function calculates and returns the number of HBQs required to be
4552  * configured.
4553  **/
4554 int
4555 lpfc_sli_hbq_count(void)
4556 {
4557 	return ARRAY_SIZE(lpfc_hbq_defs);
4558 }
4559 
4560 /**
4561  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4562  *
4563  * This function adds the number of hbq entries in every HBQ to get
4564  * the total number of hbq entries required for the HBA and returns
4565  * the total count.
4566  **/
4567 static int
4568 lpfc_sli_hbq_entry_count(void)
4569 {
4570 	int  hbq_count = lpfc_sli_hbq_count();
4571 	int  count = 0;
4572 	int  i;
4573 
4574 	for (i = 0; i < hbq_count; ++i)
4575 		count += lpfc_hbq_defs[i]->entry_count;
4576 	return count;
4577 }
4578 
4579 /**
4580  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4581  *
4582  * This function calculates amount of memory required for all hbq entries
4583  * to be configured and returns the total memory required.
4584  **/
4585 int
4586 lpfc_sli_hbq_size(void)
4587 {
4588 	return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
4589 }
4590 
4591 /**
4592  * lpfc_sli_hbq_setup - configure and initialize HBQs
4593  * @phba: Pointer to HBA context object.
4594  *
4595  * This function is called during the SLI initialization to configure
4596  * all the HBQs and post buffers to the HBQ. The caller is not
4597  * required to hold any locks. This function will return zero if successful
4598  * else it will return negative error code.
4599  **/
4600 static int
4601 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
4602 {
4603 	int  hbq_count = lpfc_sli_hbq_count();
4604 	LPFC_MBOXQ_t *pmb;
4605 	MAILBOX_t *pmbox;
4606 	uint32_t hbqno;
4607 	uint32_t hbq_entry_index;
4608 
4609 				/* Get a Mailbox buffer to setup mailbox
4610 				 * commands for HBA initialization
4611 				 */
4612 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4613 
4614 	if (!pmb)
4615 		return -ENOMEM;
4616 
4617 	pmbox = &pmb->u.mb;
4618 
4619 	/* Initialize the struct lpfc_sli_hbq structure for each hbq */
4620 	phba->link_state = LPFC_INIT_MBX_CMDS;
4621 	phba->hbq_in_use = 1;
4622 
4623 	hbq_entry_index = 0;
4624 	for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
4625 		phba->hbqs[hbqno].next_hbqPutIdx = 0;
4626 		phba->hbqs[hbqno].hbqPutIdx      = 0;
4627 		phba->hbqs[hbqno].local_hbqGetIdx   = 0;
4628 		phba->hbqs[hbqno].entry_count =
4629 			lpfc_hbq_defs[hbqno]->entry_count;
4630 		lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
4631 			hbq_entry_index, pmb);
4632 		hbq_entry_index += phba->hbqs[hbqno].entry_count;
4633 
4634 		if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
4635 			/* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4636 			   mbxStatus <status>, ring <num> */
4637 
4638 			lpfc_printf_log(phba, KERN_ERR,
4639 					LOG_SLI | LOG_VPORT,
4640 					"1805 Adapter failed to init. "
4641 					"Data: x%x x%x x%x\n",
4642 					pmbox->mbxCommand,
4643 					pmbox->mbxStatus, hbqno);
4644 
4645 			phba->link_state = LPFC_HBA_ERROR;
4646 			mempool_free(pmb, phba->mbox_mem_pool);
4647 			return -ENXIO;
4648 		}
4649 	}
4650 	phba->hbq_count = hbq_count;
4651 
4652 	mempool_free(pmb, phba->mbox_mem_pool);
4653 
4654 	/* Initially populate or replenish the HBQs */
4655 	for (hbqno = 0; hbqno < hbq_count; ++hbqno)
4656 		lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
4657 	return 0;
4658 }
4659 
4660 /**
4661  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4662  * @phba: Pointer to HBA context object.
4663  *
4664  * This function is called during the SLI initialization to configure
4665  * all the HBQs and post buffers to the HBQ. The caller is not
4666  * required to hold any locks. This function will return zero if successful
4667  * else it will return negative error code.
4668  **/
4669 static int
4670 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
4671 {
4672 	phba->hbq_in_use = 1;
4673 	phba->hbqs[LPFC_ELS_HBQ].entry_count =
4674 		lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
4675 	phba->hbq_count = 1;
4676 	lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
4677 	/* Initially populate or replenish the HBQs */
4678 	return 0;
4679 }
4680 
4681 /**
4682  * lpfc_sli_config_port - Issue config port mailbox command
4683  * @phba: Pointer to HBA context object.
4684  * @sli_mode: sli mode - 2/3
4685  *
4686  * This function is called by the sli initialization code path
4687  * to issue config_port mailbox command. This function restarts the
4688  * HBA firmware and issues a config_port mailbox command to configure
4689  * the SLI interface in the sli mode specified by sli_mode
4690  * variable. The caller is not required to hold any locks.
4691  * The function returns 0 if successful, else returns negative error
4692  * code.
4693  **/
4694 int
4695 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
4696 {
4697 	LPFC_MBOXQ_t *pmb;
4698 	uint32_t resetcount = 0, rc = 0, done = 0;
4699 
4700 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4701 	if (!pmb) {
4702 		phba->link_state = LPFC_HBA_ERROR;
4703 		return -ENOMEM;
4704 	}
4705 
4706 	phba->sli_rev = sli_mode;
4707 	while (resetcount < 2 && !done) {
4708 		spin_lock_irq(&phba->hbalock);
4709 		phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
4710 		spin_unlock_irq(&phba->hbalock);
4711 		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4712 		lpfc_sli_brdrestart(phba);
4713 		rc = lpfc_sli_chipset_init(phba);
4714 		if (rc)
4715 			break;
4716 
4717 		spin_lock_irq(&phba->hbalock);
4718 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
4719 		spin_unlock_irq(&phba->hbalock);
4720 		resetcount++;
4721 
4722 		/* Call pre CONFIG_PORT mailbox command initialization.  A
4723 		 * value of 0 means the call was successful.  Any other
4724 		 * nonzero value is a failure, but if ERESTART is returned,
4725 		 * the driver may reset the HBA and try again.
4726 		 */
4727 		rc = lpfc_config_port_prep(phba);
4728 		if (rc == -ERESTART) {
4729 			phba->link_state = LPFC_LINK_UNKNOWN;
4730 			continue;
4731 		} else if (rc)
4732 			break;
4733 
4734 		phba->link_state = LPFC_INIT_MBX_CMDS;
4735 		lpfc_config_port(phba, pmb);
4736 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
4737 		phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
4738 					LPFC_SLI3_HBQ_ENABLED |
4739 					LPFC_SLI3_CRP_ENABLED |
4740 					LPFC_SLI3_BG_ENABLED |
4741 					LPFC_SLI3_DSS_ENABLED);
4742 		if (rc != MBX_SUCCESS) {
4743 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4744 				"0442 Adapter failed to init, mbxCmd x%x "
4745 				"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4746 				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
4747 			spin_lock_irq(&phba->hbalock);
4748 			phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
4749 			spin_unlock_irq(&phba->hbalock);
4750 			rc = -ENXIO;
4751 		} else {
4752 			/* Allow asynchronous mailbox command to go through */
4753 			spin_lock_irq(&phba->hbalock);
4754 			phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
4755 			spin_unlock_irq(&phba->hbalock);
4756 			done = 1;
4757 
4758 			if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
4759 			    (pmb->u.mb.un.varCfgPort.gasabt == 0))
4760 				lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
4761 					"3110 Port did not grant ASABT\n");
4762 		}
4763 	}
4764 	if (!done) {
4765 		rc = -EINVAL;
4766 		goto do_prep_failed;
4767 	}
4768 	if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
4769 		if (!pmb->u.mb.un.varCfgPort.cMA) {
4770 			rc = -ENXIO;
4771 			goto do_prep_failed;
4772 		}
4773 		if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
4774 			phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
4775 			phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
4776 			phba->max_vports = (phba->max_vpi > phba->max_vports) ?
4777 				phba->max_vpi : phba->max_vports;
4778 
4779 		} else
4780 			phba->max_vpi = 0;
4781 		phba->fips_level = 0;
4782 		phba->fips_spec_rev = 0;
4783 		if (pmb->u.mb.un.varCfgPort.gdss) {
4784 			phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
4785 			phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
4786 			phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
4787 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4788 					"2850 Security Crypto Active. FIPS x%d "
4789 					"(Spec Rev: x%d)",
4790 					phba->fips_level, phba->fips_spec_rev);
4791 		}
4792 		if (pmb->u.mb.un.varCfgPort.sec_err) {
4793 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4794 					"2856 Config Port Security Crypto "
4795 					"Error: x%x ",
4796 					pmb->u.mb.un.varCfgPort.sec_err);
4797 		}
4798 		if (pmb->u.mb.un.varCfgPort.gerbm)
4799 			phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
4800 		if (pmb->u.mb.un.varCfgPort.gcrp)
4801 			phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
4802 
4803 		phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
4804 		phba->port_gp = phba->mbox->us.s3_pgp.port;
4805 
4806 		if (phba->cfg_enable_bg) {
4807 			if (pmb->u.mb.un.varCfgPort.gbg)
4808 				phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
4809 			else
4810 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4811 						"0443 Adapter did not grant "
4812 						"BlockGuard\n");
4813 		}
4814 	} else {
4815 		phba->hbq_get = NULL;
4816 		phba->port_gp = phba->mbox->us.s2.port;
4817 		phba->max_vpi = 0;
4818 	}
4819 do_prep_failed:
4820 	mempool_free(pmb, phba->mbox_mem_pool);
4821 	return rc;
4822 }
4823 
4824 
4825 /**
4826  * lpfc_sli_hba_setup - SLI initialization function
4827  * @phba: Pointer to HBA context object.
4828  *
4829  * This function is the main SLI initialization function. This function
4830  * is called by the HBA initialization code, HBA reset code and HBA
4831  * error attention handler code. Caller is not required to hold any
4832  * locks. This function issues config_port mailbox command to configure
4833  * the SLI, setup iocb rings and HBQ rings. In the end the function
4834  * calls the config_port_post function to issue init_link mailbox
4835  * command and to start the discovery. The function will return zero
4836  * if successful, else it will return negative error code.
4837  **/
4838 int
4839 lpfc_sli_hba_setup(struct lpfc_hba *phba)
4840 {
4841 	uint32_t rc;
4842 	int  mode = 3, i;
4843 	int longs;
4844 
4845 	switch (phba->cfg_sli_mode) {
4846 	case 2:
4847 		if (phba->cfg_enable_npiv) {
4848 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4849 				"1824 NPIV enabled: Override sli_mode "
4850 				"parameter (%d) to auto (0).\n",
4851 				phba->cfg_sli_mode);
4852 			break;
4853 		}
4854 		mode = 2;
4855 		break;
4856 	case 0:
4857 	case 3:
4858 		break;
4859 	default:
4860 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4861 				"1819 Unrecognized sli_mode parameter: %d.\n",
4862 				phba->cfg_sli_mode);
4863 
4864 		break;
4865 	}
4866 	phba->fcp_embed_io = 0;	/* SLI4 FC support only */
4867 
4868 	rc = lpfc_sli_config_port(phba, mode);
4869 
4870 	if (rc && phba->cfg_sli_mode == 3)
4871 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
4872 				"1820 Unable to select SLI-3.  "
4873 				"Not supported by adapter.\n");
4874 	if (rc && mode != 2)
4875 		rc = lpfc_sli_config_port(phba, 2);
4876 	else if (rc && mode == 2)
4877 		rc = lpfc_sli_config_port(phba, 3);
4878 	if (rc)
4879 		goto lpfc_sli_hba_setup_error;
4880 
4881 	/* Enable PCIe device Advanced Error Reporting (AER) if configured */
4882 	if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
4883 		rc = pci_enable_pcie_error_reporting(phba->pcidev);
4884 		if (!rc) {
4885 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4886 					"2709 This device supports "
4887 					"Advanced Error Reporting (AER)\n");
4888 			spin_lock_irq(&phba->hbalock);
4889 			phba->hba_flag |= HBA_AER_ENABLED;
4890 			spin_unlock_irq(&phba->hbalock);
4891 		} else {
4892 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4893 					"2708 This device does not support "
4894 					"Advanced Error Reporting (AER): %d\n",
4895 					rc);
4896 			phba->cfg_aer_support = 0;
4897 		}
4898 	}
4899 
4900 	if (phba->sli_rev == 3) {
4901 		phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
4902 		phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
4903 	} else {
4904 		phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
4905 		phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
4906 		phba->sli3_options = 0;
4907 	}
4908 
4909 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4910 			"0444 Firmware in SLI %x mode. Max_vpi %d\n",
4911 			phba->sli_rev, phba->max_vpi);
4912 	rc = lpfc_sli_ring_map(phba);
4913 
4914 	if (rc)
4915 		goto lpfc_sli_hba_setup_error;
4916 
4917 	/* Initialize VPIs. */
4918 	if (phba->sli_rev == LPFC_SLI_REV3) {
4919 		/*
4920 		 * The VPI bitmask and physical ID array are allocated
4921 		 * and initialized once only - at driver load.  A port
4922 		 * reset doesn't need to reinitialize this memory.
4923 		 */
4924 		if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
4925 			longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
4926 			phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long),
4927 						  GFP_KERNEL);
4928 			if (!phba->vpi_bmask) {
4929 				rc = -ENOMEM;
4930 				goto lpfc_sli_hba_setup_error;
4931 			}
4932 
4933 			phba->vpi_ids = kzalloc(
4934 					(phba->max_vpi+1) * sizeof(uint16_t),
4935 					GFP_KERNEL);
4936 			if (!phba->vpi_ids) {
4937 				kfree(phba->vpi_bmask);
4938 				rc = -ENOMEM;
4939 				goto lpfc_sli_hba_setup_error;
4940 			}
4941 			for (i = 0; i < phba->max_vpi; i++)
4942 				phba->vpi_ids[i] = i;
4943 		}
4944 	}
4945 
4946 	/* Init HBQs */
4947 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
4948 		rc = lpfc_sli_hbq_setup(phba);
4949 		if (rc)
4950 			goto lpfc_sli_hba_setup_error;
4951 	}
4952 	spin_lock_irq(&phba->hbalock);
4953 	phba->sli.sli_flag |= LPFC_PROCESS_LA;
4954 	spin_unlock_irq(&phba->hbalock);
4955 
4956 	rc = lpfc_config_port_post(phba);
4957 	if (rc)
4958 		goto lpfc_sli_hba_setup_error;
4959 
4960 	return rc;
4961 
4962 lpfc_sli_hba_setup_error:
4963 	phba->link_state = LPFC_HBA_ERROR;
4964 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
4965 			"0445 Firmware initialization failed\n");
4966 	return rc;
4967 }
4968 
4969 /**
4970  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4971  * @phba: Pointer to HBA context object.
4972  * @mboxq: mailbox pointer.
4973  * This function issue a dump mailbox command to read config region
4974  * 23 and parse the records in the region and populate driver
4975  * data structure.
4976  **/
4977 static int
4978 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
4979 {
4980 	LPFC_MBOXQ_t *mboxq;
4981 	struct lpfc_dmabuf *mp;
4982 	struct lpfc_mqe *mqe;
4983 	uint32_t data_length;
4984 	int rc;
4985 
4986 	/* Program the default value of vlan_id and fc_map */
4987 	phba->valid_vlan = 0;
4988 	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
4989 	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
4990 	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
4991 
4992 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4993 	if (!mboxq)
4994 		return -ENOMEM;
4995 
4996 	mqe = &mboxq->u.mqe;
4997 	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
4998 		rc = -ENOMEM;
4999 		goto out_free_mboxq;
5000 	}
5001 
5002 	mp = (struct lpfc_dmabuf *) mboxq->context1;
5003 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5004 
5005 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5006 			"(%d):2571 Mailbox cmd x%x Status x%x "
5007 			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5008 			"x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5009 			"CQ: x%x x%x x%x x%x\n",
5010 			mboxq->vport ? mboxq->vport->vpi : 0,
5011 			bf_get(lpfc_mqe_command, mqe),
5012 			bf_get(lpfc_mqe_status, mqe),
5013 			mqe->un.mb_words[0], mqe->un.mb_words[1],
5014 			mqe->un.mb_words[2], mqe->un.mb_words[3],
5015 			mqe->un.mb_words[4], mqe->un.mb_words[5],
5016 			mqe->un.mb_words[6], mqe->un.mb_words[7],
5017 			mqe->un.mb_words[8], mqe->un.mb_words[9],
5018 			mqe->un.mb_words[10], mqe->un.mb_words[11],
5019 			mqe->un.mb_words[12], mqe->un.mb_words[13],
5020 			mqe->un.mb_words[14], mqe->un.mb_words[15],
5021 			mqe->un.mb_words[16], mqe->un.mb_words[50],
5022 			mboxq->mcqe.word0,
5023 			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
5024 			mboxq->mcqe.trailer);
5025 
5026 	if (rc) {
5027 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
5028 		kfree(mp);
5029 		rc = -EIO;
5030 		goto out_free_mboxq;
5031 	}
5032 	data_length = mqe->un.mb_words[5];
5033 	if (data_length > DMP_RGN23_SIZE) {
5034 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
5035 		kfree(mp);
5036 		rc = -EIO;
5037 		goto out_free_mboxq;
5038 	}
5039 
5040 	lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5041 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
5042 	kfree(mp);
5043 	rc = 0;
5044 
5045 out_free_mboxq:
5046 	mempool_free(mboxq, phba->mbox_mem_pool);
5047 	return rc;
5048 }
5049 
5050 /**
5051  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5052  * @phba: pointer to lpfc hba data structure.
5053  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5054  * @vpd: pointer to the memory to hold resulting port vpd data.
5055  * @vpd_size: On input, the number of bytes allocated to @vpd.
5056  *	      On output, the number of data bytes in @vpd.
5057  *
5058  * This routine executes a READ_REV SLI4 mailbox command.  In
5059  * addition, this routine gets the port vpd data.
5060  *
5061  * Return codes
5062  * 	0 - successful
5063  * 	-ENOMEM - could not allocated memory.
5064  **/
5065 static int
5066 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5067 		    uint8_t *vpd, uint32_t *vpd_size)
5068 {
5069 	int rc = 0;
5070 	uint32_t dma_size;
5071 	struct lpfc_dmabuf *dmabuf;
5072 	struct lpfc_mqe *mqe;
5073 
5074 	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5075 	if (!dmabuf)
5076 		return -ENOMEM;
5077 
5078 	/*
5079 	 * Get a DMA buffer for the vpd data resulting from the READ_REV
5080 	 * mailbox command.
5081 	 */
5082 	dma_size = *vpd_size;
5083 	dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
5084 					   &dmabuf->phys, GFP_KERNEL);
5085 	if (!dmabuf->virt) {
5086 		kfree(dmabuf);
5087 		return -ENOMEM;
5088 	}
5089 
5090 	/*
5091 	 * The SLI4 implementation of READ_REV conflicts at word1,
5092 	 * bits 31:16 and SLI4 adds vpd functionality not present
5093 	 * in SLI3.  This code corrects the conflicts.
5094 	 */
5095 	lpfc_read_rev(phba, mboxq);
5096 	mqe = &mboxq->u.mqe;
5097 	mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5098 	mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5099 	mqe->un.read_rev.word1 &= 0x0000FFFF;
5100 	bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5101 	bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5102 
5103 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5104 	if (rc) {
5105 		dma_free_coherent(&phba->pcidev->dev, dma_size,
5106 				  dmabuf->virt, dmabuf->phys);
5107 		kfree(dmabuf);
5108 		return -EIO;
5109 	}
5110 
5111 	/*
5112 	 * The available vpd length cannot be bigger than the
5113 	 * DMA buffer passed to the port.  Catch the less than
5114 	 * case and update the caller's size.
5115 	 */
5116 	if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5117 		*vpd_size = mqe->un.read_rev.avail_vpd_len;
5118 
5119 	memcpy(vpd, dmabuf->virt, *vpd_size);
5120 
5121 	dma_free_coherent(&phba->pcidev->dev, dma_size,
5122 			  dmabuf->virt, dmabuf->phys);
5123 	kfree(dmabuf);
5124 	return 0;
5125 }
5126 
5127 /**
5128  * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5129  * @phba: pointer to lpfc hba data structure.
5130  *
5131  * This routine retrieves SLI4 device physical port name this PCI function
5132  * is attached to.
5133  *
5134  * Return codes
5135  *      0 - successful
5136  *      otherwise - failed to retrieve physical port name
5137  **/
5138 static int
5139 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
5140 {
5141 	LPFC_MBOXQ_t *mboxq;
5142 	struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5143 	struct lpfc_controller_attribute *cntl_attr;
5144 	struct lpfc_mbx_get_port_name *get_port_name;
5145 	void *virtaddr = NULL;
5146 	uint32_t alloclen, reqlen;
5147 	uint32_t shdr_status, shdr_add_status;
5148 	union lpfc_sli4_cfg_shdr *shdr;
5149 	char cport_name = 0;
5150 	int rc;
5151 
5152 	/* We assume nothing at this point */
5153 	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5154 	phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
5155 
5156 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5157 	if (!mboxq)
5158 		return -ENOMEM;
5159 	/* obtain link type and link number via READ_CONFIG */
5160 	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
5161 	lpfc_sli4_read_config(phba);
5162 	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
5163 		goto retrieve_ppname;
5164 
5165 	/* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5166 	reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5167 	alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5168 			LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5169 			LPFC_SLI4_MBX_NEMBED);
5170 	if (alloclen < reqlen) {
5171 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5172 				"3084 Allocated DMA memory size (%d) is "
5173 				"less than the requested DMA memory size "
5174 				"(%d)\n", alloclen, reqlen);
5175 		rc = -ENOMEM;
5176 		goto out_free_mboxq;
5177 	}
5178 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5179 	virtaddr = mboxq->sge_array->addr[0];
5180 	mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5181 	shdr = &mbx_cntl_attr->cfg_shdr;
5182 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5183 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5184 	if (shdr_status || shdr_add_status || rc) {
5185 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5186 				"3085 Mailbox x%x (x%x/x%x) failed, "
5187 				"rc:x%x, status:x%x, add_status:x%x\n",
5188 				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5189 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5190 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5191 				rc, shdr_status, shdr_add_status);
5192 		rc = -ENXIO;
5193 		goto out_free_mboxq;
5194 	}
5195 	cntl_attr = &mbx_cntl_attr->cntl_attr;
5196 	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5197 	phba->sli4_hba.lnk_info.lnk_tp =
5198 		bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5199 	phba->sli4_hba.lnk_info.lnk_no =
5200 		bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5201 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5202 			"3086 lnk_type:%d, lnk_numb:%d\n",
5203 			phba->sli4_hba.lnk_info.lnk_tp,
5204 			phba->sli4_hba.lnk_info.lnk_no);
5205 
5206 retrieve_ppname:
5207 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5208 		LPFC_MBOX_OPCODE_GET_PORT_NAME,
5209 		sizeof(struct lpfc_mbx_get_port_name) -
5210 		sizeof(struct lpfc_sli4_cfg_mhdr),
5211 		LPFC_SLI4_MBX_EMBED);
5212 	get_port_name = &mboxq->u.mqe.un.get_port_name;
5213 	shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
5214 	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
5215 	bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
5216 		phba->sli4_hba.lnk_info.lnk_tp);
5217 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5218 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5219 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5220 	if (shdr_status || shdr_add_status || rc) {
5221 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5222 				"3087 Mailbox x%x (x%x/x%x) failed: "
5223 				"rc:x%x, status:x%x, add_status:x%x\n",
5224 				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5225 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5226 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5227 				rc, shdr_status, shdr_add_status);
5228 		rc = -ENXIO;
5229 		goto out_free_mboxq;
5230 	}
5231 	switch (phba->sli4_hba.lnk_info.lnk_no) {
5232 	case LPFC_LINK_NUMBER_0:
5233 		cport_name = bf_get(lpfc_mbx_get_port_name_name0,
5234 				&get_port_name->u.response);
5235 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5236 		break;
5237 	case LPFC_LINK_NUMBER_1:
5238 		cport_name = bf_get(lpfc_mbx_get_port_name_name1,
5239 				&get_port_name->u.response);
5240 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5241 		break;
5242 	case LPFC_LINK_NUMBER_2:
5243 		cport_name = bf_get(lpfc_mbx_get_port_name_name2,
5244 				&get_port_name->u.response);
5245 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5246 		break;
5247 	case LPFC_LINK_NUMBER_3:
5248 		cport_name = bf_get(lpfc_mbx_get_port_name_name3,
5249 				&get_port_name->u.response);
5250 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
5251 		break;
5252 	default:
5253 		break;
5254 	}
5255 
5256 	if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
5257 		phba->Port[0] = cport_name;
5258 		phba->Port[1] = '\0';
5259 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5260 				"3091 SLI get port name: %s\n", phba->Port);
5261 	}
5262 
5263 out_free_mboxq:
5264 	if (rc != MBX_TIMEOUT) {
5265 		if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
5266 			lpfc_sli4_mbox_cmd_free(phba, mboxq);
5267 		else
5268 			mempool_free(mboxq, phba->mbox_mem_pool);
5269 	}
5270 	return rc;
5271 }
5272 
5273 /**
5274  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5275  * @phba: pointer to lpfc hba data structure.
5276  *
5277  * This routine is called to explicitly arm the SLI4 device's completion and
5278  * event queues
5279  **/
5280 static void
5281 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
5282 {
5283 	int qidx;
5284 
5285 	lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
5286 	lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
5287 	if (phba->sli4_hba.nvmels_cq)
5288 		lpfc_sli4_cq_release(phba->sli4_hba.nvmels_cq,
5289 						LPFC_QUEUE_REARM);
5290 
5291 	if (phba->sli4_hba.fcp_cq)
5292 		for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
5293 			lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[qidx],
5294 						LPFC_QUEUE_REARM);
5295 
5296 	if (phba->sli4_hba.nvme_cq)
5297 		for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
5298 			lpfc_sli4_cq_release(phba->sli4_hba.nvme_cq[qidx],
5299 						LPFC_QUEUE_REARM);
5300 
5301 	if (phba->cfg_fof)
5302 		lpfc_sli4_cq_release(phba->sli4_hba.oas_cq, LPFC_QUEUE_REARM);
5303 
5304 	if (phba->sli4_hba.hba_eq)
5305 		for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
5306 			lpfc_sli4_eq_release(phba->sli4_hba.hba_eq[qidx],
5307 						LPFC_QUEUE_REARM);
5308 
5309 	if (phba->nvmet_support) {
5310 		for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
5311 			lpfc_sli4_cq_release(
5312 				phba->sli4_hba.nvmet_cqset[qidx],
5313 				LPFC_QUEUE_REARM);
5314 		}
5315 	}
5316 
5317 	if (phba->cfg_fof)
5318 		lpfc_sli4_eq_release(phba->sli4_hba.fof_eq, LPFC_QUEUE_REARM);
5319 }
5320 
5321 /**
5322  * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5323  * @phba: Pointer to HBA context object.
5324  * @type: The resource extent type.
5325  * @extnt_count: buffer to hold port available extent count.
5326  * @extnt_size: buffer to hold element count per extent.
5327  *
5328  * This function calls the port and retrievs the number of available
5329  * extents and their size for a particular extent type.
5330  *
5331  * Returns: 0 if successful.  Nonzero otherwise.
5332  **/
5333 int
5334 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
5335 			       uint16_t *extnt_count, uint16_t *extnt_size)
5336 {
5337 	int rc = 0;
5338 	uint32_t length;
5339 	uint32_t mbox_tmo;
5340 	struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
5341 	LPFC_MBOXQ_t *mbox;
5342 
5343 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5344 	if (!mbox)
5345 		return -ENOMEM;
5346 
5347 	/* Find out how many extents are available for this resource type */
5348 	length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
5349 		  sizeof(struct lpfc_sli4_cfg_mhdr));
5350 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5351 			 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
5352 			 length, LPFC_SLI4_MBX_EMBED);
5353 
5354 	/* Send an extents count of 0 - the GET doesn't use it. */
5355 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5356 					LPFC_SLI4_MBX_EMBED);
5357 	if (unlikely(rc)) {
5358 		rc = -EIO;
5359 		goto err_exit;
5360 	}
5361 
5362 	if (!phba->sli4_hba.intr_enable)
5363 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5364 	else {
5365 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5366 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5367 	}
5368 	if (unlikely(rc)) {
5369 		rc = -EIO;
5370 		goto err_exit;
5371 	}
5372 
5373 	rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
5374 	if (bf_get(lpfc_mbox_hdr_status,
5375 		   &rsrc_info->header.cfg_shdr.response)) {
5376 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5377 				"2930 Failed to get resource extents "
5378 				"Status 0x%x Add'l Status 0x%x\n",
5379 				bf_get(lpfc_mbox_hdr_status,
5380 				       &rsrc_info->header.cfg_shdr.response),
5381 				bf_get(lpfc_mbox_hdr_add_status,
5382 				       &rsrc_info->header.cfg_shdr.response));
5383 		rc = -EIO;
5384 		goto err_exit;
5385 	}
5386 
5387 	*extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
5388 			      &rsrc_info->u.rsp);
5389 	*extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
5390 			     &rsrc_info->u.rsp);
5391 
5392 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5393 			"3162 Retrieved extents type-%d from port: count:%d, "
5394 			"size:%d\n", type, *extnt_count, *extnt_size);
5395 
5396 err_exit:
5397 	mempool_free(mbox, phba->mbox_mem_pool);
5398 	return rc;
5399 }
5400 
5401 /**
5402  * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5403  * @phba: Pointer to HBA context object.
5404  * @type: The extent type to check.
5405  *
5406  * This function reads the current available extents from the port and checks
5407  * if the extent count or extent size has changed since the last access.
5408  * Callers use this routine post port reset to understand if there is a
5409  * extent reprovisioning requirement.
5410  *
5411  * Returns:
5412  *   -Error: error indicates problem.
5413  *   1: Extent count or size has changed.
5414  *   0: No changes.
5415  **/
5416 static int
5417 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
5418 {
5419 	uint16_t curr_ext_cnt, rsrc_ext_cnt;
5420 	uint16_t size_diff, rsrc_ext_size;
5421 	int rc = 0;
5422 	struct lpfc_rsrc_blks *rsrc_entry;
5423 	struct list_head *rsrc_blk_list = NULL;
5424 
5425 	size_diff = 0;
5426 	curr_ext_cnt = 0;
5427 	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5428 					    &rsrc_ext_cnt,
5429 					    &rsrc_ext_size);
5430 	if (unlikely(rc))
5431 		return -EIO;
5432 
5433 	switch (type) {
5434 	case LPFC_RSC_TYPE_FCOE_RPI:
5435 		rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5436 		break;
5437 	case LPFC_RSC_TYPE_FCOE_VPI:
5438 		rsrc_blk_list = &phba->lpfc_vpi_blk_list;
5439 		break;
5440 	case LPFC_RSC_TYPE_FCOE_XRI:
5441 		rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5442 		break;
5443 	case LPFC_RSC_TYPE_FCOE_VFI:
5444 		rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5445 		break;
5446 	default:
5447 		break;
5448 	}
5449 
5450 	list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
5451 		curr_ext_cnt++;
5452 		if (rsrc_entry->rsrc_size != rsrc_ext_size)
5453 			size_diff++;
5454 	}
5455 
5456 	if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
5457 		rc = 1;
5458 
5459 	return rc;
5460 }
5461 
5462 /**
5463  * lpfc_sli4_cfg_post_extnts -
5464  * @phba: Pointer to HBA context object.
5465  * @extnt_cnt - number of available extents.
5466  * @type - the extent type (rpi, xri, vfi, vpi).
5467  * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5468  * @mbox - pointer to the caller's allocated mailbox structure.
5469  *
5470  * This function executes the extents allocation request.  It also
5471  * takes care of the amount of memory needed to allocate or get the
5472  * allocated extents. It is the caller's responsibility to evaluate
5473  * the response.
5474  *
5475  * Returns:
5476  *   -Error:  Error value describes the condition found.
5477  *   0: if successful
5478  **/
5479 static int
5480 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
5481 			  uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
5482 {
5483 	int rc = 0;
5484 	uint32_t req_len;
5485 	uint32_t emb_len;
5486 	uint32_t alloc_len, mbox_tmo;
5487 
5488 	/* Calculate the total requested length of the dma memory */
5489 	req_len = extnt_cnt * sizeof(uint16_t);
5490 
5491 	/*
5492 	 * Calculate the size of an embedded mailbox.  The uint32_t
5493 	 * accounts for extents-specific word.
5494 	 */
5495 	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
5496 		sizeof(uint32_t);
5497 
5498 	/*
5499 	 * Presume the allocation and response will fit into an embedded
5500 	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
5501 	 */
5502 	*emb = LPFC_SLI4_MBX_EMBED;
5503 	if (req_len > emb_len) {
5504 		req_len = extnt_cnt * sizeof(uint16_t) +
5505 			sizeof(union lpfc_sli4_cfg_shdr) +
5506 			sizeof(uint32_t);
5507 		*emb = LPFC_SLI4_MBX_NEMBED;
5508 	}
5509 
5510 	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5511 				     LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
5512 				     req_len, *emb);
5513 	if (alloc_len < req_len) {
5514 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5515 			"2982 Allocated DMA memory size (x%x) is "
5516 			"less than the requested DMA memory "
5517 			"size (x%x)\n", alloc_len, req_len);
5518 		return -ENOMEM;
5519 	}
5520 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
5521 	if (unlikely(rc))
5522 		return -EIO;
5523 
5524 	if (!phba->sli4_hba.intr_enable)
5525 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5526 	else {
5527 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5528 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5529 	}
5530 
5531 	if (unlikely(rc))
5532 		rc = -EIO;
5533 	return rc;
5534 }
5535 
5536 /**
5537  * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5538  * @phba: Pointer to HBA context object.
5539  * @type:  The resource extent type to allocate.
5540  *
5541  * This function allocates the number of elements for the specified
5542  * resource type.
5543  **/
5544 static int
5545 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
5546 {
5547 	bool emb = false;
5548 	uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
5549 	uint16_t rsrc_id, rsrc_start, j, k;
5550 	uint16_t *ids;
5551 	int i, rc;
5552 	unsigned long longs;
5553 	unsigned long *bmask;
5554 	struct lpfc_rsrc_blks *rsrc_blks;
5555 	LPFC_MBOXQ_t *mbox;
5556 	uint32_t length;
5557 	struct lpfc_id_range *id_array = NULL;
5558 	void *virtaddr = NULL;
5559 	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
5560 	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
5561 	struct list_head *ext_blk_list;
5562 
5563 	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
5564 					    &rsrc_cnt,
5565 					    &rsrc_size);
5566 	if (unlikely(rc))
5567 		return -EIO;
5568 
5569 	if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
5570 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5571 			"3009 No available Resource Extents "
5572 			"for resource type 0x%x: Count: 0x%x, "
5573 			"Size 0x%x\n", type, rsrc_cnt,
5574 			rsrc_size);
5575 		return -ENOMEM;
5576 	}
5577 
5578 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
5579 			"2903 Post resource extents type-0x%x: "
5580 			"count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
5581 
5582 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5583 	if (!mbox)
5584 		return -ENOMEM;
5585 
5586 	rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
5587 	if (unlikely(rc)) {
5588 		rc = -EIO;
5589 		goto err_exit;
5590 	}
5591 
5592 	/*
5593 	 * Figure out where the response is located.  Then get local pointers
5594 	 * to the response data.  The port does not guarantee to respond to
5595 	 * all extents counts request so update the local variable with the
5596 	 * allocated count from the port.
5597 	 */
5598 	if (emb == LPFC_SLI4_MBX_EMBED) {
5599 		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
5600 		id_array = &rsrc_ext->u.rsp.id[0];
5601 		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
5602 	} else {
5603 		virtaddr = mbox->sge_array->addr[0];
5604 		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
5605 		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
5606 		id_array = &n_rsrc->id;
5607 	}
5608 
5609 	longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
5610 	rsrc_id_cnt = rsrc_cnt * rsrc_size;
5611 
5612 	/*
5613 	 * Based on the resource size and count, correct the base and max
5614 	 * resource values.
5615 	 */
5616 	length = sizeof(struct lpfc_rsrc_blks);
5617 	switch (type) {
5618 	case LPFC_RSC_TYPE_FCOE_RPI:
5619 		phba->sli4_hba.rpi_bmask = kzalloc(longs *
5620 						   sizeof(unsigned long),
5621 						   GFP_KERNEL);
5622 		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
5623 			rc = -ENOMEM;
5624 			goto err_exit;
5625 		}
5626 		phba->sli4_hba.rpi_ids = kzalloc(rsrc_id_cnt *
5627 						 sizeof(uint16_t),
5628 						 GFP_KERNEL);
5629 		if (unlikely(!phba->sli4_hba.rpi_ids)) {
5630 			kfree(phba->sli4_hba.rpi_bmask);
5631 			rc = -ENOMEM;
5632 			goto err_exit;
5633 		}
5634 
5635 		/*
5636 		 * The next_rpi was initialized with the maximum available
5637 		 * count but the port may allocate a smaller number.  Catch
5638 		 * that case and update the next_rpi.
5639 		 */
5640 		phba->sli4_hba.next_rpi = rsrc_id_cnt;
5641 
5642 		/* Initialize local ptrs for common extent processing later. */
5643 		bmask = phba->sli4_hba.rpi_bmask;
5644 		ids = phba->sli4_hba.rpi_ids;
5645 		ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
5646 		break;
5647 	case LPFC_RSC_TYPE_FCOE_VPI:
5648 		phba->vpi_bmask = kzalloc(longs *
5649 					  sizeof(unsigned long),
5650 					  GFP_KERNEL);
5651 		if (unlikely(!phba->vpi_bmask)) {
5652 			rc = -ENOMEM;
5653 			goto err_exit;
5654 		}
5655 		phba->vpi_ids = kzalloc(rsrc_id_cnt *
5656 					 sizeof(uint16_t),
5657 					 GFP_KERNEL);
5658 		if (unlikely(!phba->vpi_ids)) {
5659 			kfree(phba->vpi_bmask);
5660 			rc = -ENOMEM;
5661 			goto err_exit;
5662 		}
5663 
5664 		/* Initialize local ptrs for common extent processing later. */
5665 		bmask = phba->vpi_bmask;
5666 		ids = phba->vpi_ids;
5667 		ext_blk_list = &phba->lpfc_vpi_blk_list;
5668 		break;
5669 	case LPFC_RSC_TYPE_FCOE_XRI:
5670 		phba->sli4_hba.xri_bmask = kzalloc(longs *
5671 						   sizeof(unsigned long),
5672 						   GFP_KERNEL);
5673 		if (unlikely(!phba->sli4_hba.xri_bmask)) {
5674 			rc = -ENOMEM;
5675 			goto err_exit;
5676 		}
5677 		phba->sli4_hba.max_cfg_param.xri_used = 0;
5678 		phba->sli4_hba.xri_ids = kzalloc(rsrc_id_cnt *
5679 						 sizeof(uint16_t),
5680 						 GFP_KERNEL);
5681 		if (unlikely(!phba->sli4_hba.xri_ids)) {
5682 			kfree(phba->sli4_hba.xri_bmask);
5683 			rc = -ENOMEM;
5684 			goto err_exit;
5685 		}
5686 
5687 		/* Initialize local ptrs for common extent processing later. */
5688 		bmask = phba->sli4_hba.xri_bmask;
5689 		ids = phba->sli4_hba.xri_ids;
5690 		ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
5691 		break;
5692 	case LPFC_RSC_TYPE_FCOE_VFI:
5693 		phba->sli4_hba.vfi_bmask = kzalloc(longs *
5694 						   sizeof(unsigned long),
5695 						   GFP_KERNEL);
5696 		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
5697 			rc = -ENOMEM;
5698 			goto err_exit;
5699 		}
5700 		phba->sli4_hba.vfi_ids = kzalloc(rsrc_id_cnt *
5701 						 sizeof(uint16_t),
5702 						 GFP_KERNEL);
5703 		if (unlikely(!phba->sli4_hba.vfi_ids)) {
5704 			kfree(phba->sli4_hba.vfi_bmask);
5705 			rc = -ENOMEM;
5706 			goto err_exit;
5707 		}
5708 
5709 		/* Initialize local ptrs for common extent processing later. */
5710 		bmask = phba->sli4_hba.vfi_bmask;
5711 		ids = phba->sli4_hba.vfi_ids;
5712 		ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
5713 		break;
5714 	default:
5715 		/* Unsupported Opcode.  Fail call. */
5716 		id_array = NULL;
5717 		bmask = NULL;
5718 		ids = NULL;
5719 		ext_blk_list = NULL;
5720 		goto err_exit;
5721 	}
5722 
5723 	/*
5724 	 * Complete initializing the extent configuration with the
5725 	 * allocated ids assigned to this function.  The bitmask serves
5726 	 * as an index into the array and manages the available ids.  The
5727 	 * array just stores the ids communicated to the port via the wqes.
5728 	 */
5729 	for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
5730 		if ((i % 2) == 0)
5731 			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
5732 					 &id_array[k]);
5733 		else
5734 			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
5735 					 &id_array[k]);
5736 
5737 		rsrc_blks = kzalloc(length, GFP_KERNEL);
5738 		if (unlikely(!rsrc_blks)) {
5739 			rc = -ENOMEM;
5740 			kfree(bmask);
5741 			kfree(ids);
5742 			goto err_exit;
5743 		}
5744 		rsrc_blks->rsrc_start = rsrc_id;
5745 		rsrc_blks->rsrc_size = rsrc_size;
5746 		list_add_tail(&rsrc_blks->list, ext_blk_list);
5747 		rsrc_start = rsrc_id;
5748 		if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
5749 			phba->sli4_hba.scsi_xri_start = rsrc_start +
5750 				lpfc_sli4_get_iocb_cnt(phba);
5751 			phba->sli4_hba.nvme_xri_start =
5752 				phba->sli4_hba.scsi_xri_start +
5753 				phba->sli4_hba.scsi_xri_max;
5754 		}
5755 
5756 		while (rsrc_id < (rsrc_start + rsrc_size)) {
5757 			ids[j] = rsrc_id;
5758 			rsrc_id++;
5759 			j++;
5760 		}
5761 		/* Entire word processed.  Get next word.*/
5762 		if ((i % 2) == 1)
5763 			k++;
5764 	}
5765  err_exit:
5766 	lpfc_sli4_mbox_cmd_free(phba, mbox);
5767 	return rc;
5768 }
5769 
5770 
5771 
5772 /**
5773  * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5774  * @phba: Pointer to HBA context object.
5775  * @type: the extent's type.
5776  *
5777  * This function deallocates all extents of a particular resource type.
5778  * SLI4 does not allow for deallocating a particular extent range.  It
5779  * is the caller's responsibility to release all kernel memory resources.
5780  **/
5781 static int
5782 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
5783 {
5784 	int rc;
5785 	uint32_t length, mbox_tmo = 0;
5786 	LPFC_MBOXQ_t *mbox;
5787 	struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
5788 	struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
5789 
5790 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5791 	if (!mbox)
5792 		return -ENOMEM;
5793 
5794 	/*
5795 	 * This function sends an embedded mailbox because it only sends the
5796 	 * the resource type.  All extents of this type are released by the
5797 	 * port.
5798 	 */
5799 	length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
5800 		  sizeof(struct lpfc_sli4_cfg_mhdr));
5801 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5802 			 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
5803 			 length, LPFC_SLI4_MBX_EMBED);
5804 
5805 	/* Send an extents count of 0 - the dealloc doesn't use it. */
5806 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
5807 					LPFC_SLI4_MBX_EMBED);
5808 	if (unlikely(rc)) {
5809 		rc = -EIO;
5810 		goto out_free_mbox;
5811 	}
5812 	if (!phba->sli4_hba.intr_enable)
5813 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
5814 	else {
5815 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
5816 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
5817 	}
5818 	if (unlikely(rc)) {
5819 		rc = -EIO;
5820 		goto out_free_mbox;
5821 	}
5822 
5823 	dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
5824 	if (bf_get(lpfc_mbox_hdr_status,
5825 		   &dealloc_rsrc->header.cfg_shdr.response)) {
5826 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
5827 				"2919 Failed to release resource extents "
5828 				"for type %d - Status 0x%x Add'l Status 0x%x. "
5829 				"Resource memory not released.\n",
5830 				type,
5831 				bf_get(lpfc_mbox_hdr_status,
5832 				    &dealloc_rsrc->header.cfg_shdr.response),
5833 				bf_get(lpfc_mbox_hdr_add_status,
5834 				    &dealloc_rsrc->header.cfg_shdr.response));
5835 		rc = -EIO;
5836 		goto out_free_mbox;
5837 	}
5838 
5839 	/* Release kernel memory resources for the specific type. */
5840 	switch (type) {
5841 	case LPFC_RSC_TYPE_FCOE_VPI:
5842 		kfree(phba->vpi_bmask);
5843 		kfree(phba->vpi_ids);
5844 		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5845 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5846 				    &phba->lpfc_vpi_blk_list, list) {
5847 			list_del_init(&rsrc_blk->list);
5848 			kfree(rsrc_blk);
5849 		}
5850 		phba->sli4_hba.max_cfg_param.vpi_used = 0;
5851 		break;
5852 	case LPFC_RSC_TYPE_FCOE_XRI:
5853 		kfree(phba->sli4_hba.xri_bmask);
5854 		kfree(phba->sli4_hba.xri_ids);
5855 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5856 				    &phba->sli4_hba.lpfc_xri_blk_list, list) {
5857 			list_del_init(&rsrc_blk->list);
5858 			kfree(rsrc_blk);
5859 		}
5860 		break;
5861 	case LPFC_RSC_TYPE_FCOE_VFI:
5862 		kfree(phba->sli4_hba.vfi_bmask);
5863 		kfree(phba->sli4_hba.vfi_ids);
5864 		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5865 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5866 				    &phba->sli4_hba.lpfc_vfi_blk_list, list) {
5867 			list_del_init(&rsrc_blk->list);
5868 			kfree(rsrc_blk);
5869 		}
5870 		break;
5871 	case LPFC_RSC_TYPE_FCOE_RPI:
5872 		/* RPI bitmask and physical id array are cleaned up earlier. */
5873 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
5874 				    &phba->sli4_hba.lpfc_rpi_blk_list, list) {
5875 			list_del_init(&rsrc_blk->list);
5876 			kfree(rsrc_blk);
5877 		}
5878 		break;
5879 	default:
5880 		break;
5881 	}
5882 
5883 	bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
5884 
5885  out_free_mbox:
5886 	mempool_free(mbox, phba->mbox_mem_pool);
5887 	return rc;
5888 }
5889 
5890 static void
5891 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
5892 		  uint32_t feature)
5893 {
5894 	uint32_t len;
5895 
5896 	len = sizeof(struct lpfc_mbx_set_feature) -
5897 		sizeof(struct lpfc_sli4_cfg_mhdr);
5898 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
5899 			 LPFC_MBOX_OPCODE_SET_FEATURES, len,
5900 			 LPFC_SLI4_MBX_EMBED);
5901 
5902 	switch (feature) {
5903 	case LPFC_SET_UE_RECOVERY:
5904 		bf_set(lpfc_mbx_set_feature_UER,
5905 		       &mbox->u.mqe.un.set_feature, 1);
5906 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
5907 		mbox->u.mqe.un.set_feature.param_len = 8;
5908 		break;
5909 	case LPFC_SET_MDS_DIAGS:
5910 		bf_set(lpfc_mbx_set_feature_mds,
5911 		       &mbox->u.mqe.un.set_feature, 1);
5912 		bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
5913 		       &mbox->u.mqe.un.set_feature, 1);
5914 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
5915 		mbox->u.mqe.un.set_feature.param_len = 8;
5916 		break;
5917 	}
5918 
5919 	return;
5920 }
5921 
5922 /**
5923  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5924  * @phba: Pointer to HBA context object.
5925  *
5926  * This function allocates all SLI4 resource identifiers.
5927  **/
5928 int
5929 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
5930 {
5931 	int i, rc, error = 0;
5932 	uint16_t count, base;
5933 	unsigned long longs;
5934 
5935 	if (!phba->sli4_hba.rpi_hdrs_in_use)
5936 		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
5937 	if (phba->sli4_hba.extents_in_use) {
5938 		/*
5939 		 * The port supports resource extents. The XRI, VPI, VFI, RPI
5940 		 * resource extent count must be read and allocated before
5941 		 * provisioning the resource id arrays.
5942 		 */
5943 		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
5944 		    LPFC_IDX_RSRC_RDY) {
5945 			/*
5946 			 * Extent-based resources are set - the driver could
5947 			 * be in a port reset. Figure out if any corrective
5948 			 * actions need to be taken.
5949 			 */
5950 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5951 						 LPFC_RSC_TYPE_FCOE_VFI);
5952 			if (rc != 0)
5953 				error++;
5954 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5955 						 LPFC_RSC_TYPE_FCOE_VPI);
5956 			if (rc != 0)
5957 				error++;
5958 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5959 						 LPFC_RSC_TYPE_FCOE_XRI);
5960 			if (rc != 0)
5961 				error++;
5962 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
5963 						 LPFC_RSC_TYPE_FCOE_RPI);
5964 			if (rc != 0)
5965 				error++;
5966 
5967 			/*
5968 			 * It's possible that the number of resources
5969 			 * provided to this port instance changed between
5970 			 * resets.  Detect this condition and reallocate
5971 			 * resources.  Otherwise, there is no action.
5972 			 */
5973 			if (error) {
5974 				lpfc_printf_log(phba, KERN_INFO,
5975 						LOG_MBOX | LOG_INIT,
5976 						"2931 Detected extent resource "
5977 						"change.  Reallocating all "
5978 						"extents.\n");
5979 				rc = lpfc_sli4_dealloc_extent(phba,
5980 						 LPFC_RSC_TYPE_FCOE_VFI);
5981 				rc = lpfc_sli4_dealloc_extent(phba,
5982 						 LPFC_RSC_TYPE_FCOE_VPI);
5983 				rc = lpfc_sli4_dealloc_extent(phba,
5984 						 LPFC_RSC_TYPE_FCOE_XRI);
5985 				rc = lpfc_sli4_dealloc_extent(phba,
5986 						 LPFC_RSC_TYPE_FCOE_RPI);
5987 			} else
5988 				return 0;
5989 		}
5990 
5991 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
5992 		if (unlikely(rc))
5993 			goto err_exit;
5994 
5995 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
5996 		if (unlikely(rc))
5997 			goto err_exit;
5998 
5999 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6000 		if (unlikely(rc))
6001 			goto err_exit;
6002 
6003 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6004 		if (unlikely(rc))
6005 			goto err_exit;
6006 		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6007 		       LPFC_IDX_RSRC_RDY);
6008 		return rc;
6009 	} else {
6010 		/*
6011 		 * The port does not support resource extents.  The XRI, VPI,
6012 		 * VFI, RPI resource ids were determined from READ_CONFIG.
6013 		 * Just allocate the bitmasks and provision the resource id
6014 		 * arrays.  If a port reset is active, the resources don't
6015 		 * need any action - just exit.
6016 		 */
6017 		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
6018 		    LPFC_IDX_RSRC_RDY) {
6019 			lpfc_sli4_dealloc_resource_identifiers(phba);
6020 			lpfc_sli4_remove_rpis(phba);
6021 		}
6022 		/* RPIs. */
6023 		count = phba->sli4_hba.max_cfg_param.max_rpi;
6024 		if (count <= 0) {
6025 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6026 					"3279 Invalid provisioning of "
6027 					"rpi:%d\n", count);
6028 			rc = -EINVAL;
6029 			goto err_exit;
6030 		}
6031 		base = phba->sli4_hba.max_cfg_param.rpi_base;
6032 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6033 		phba->sli4_hba.rpi_bmask = kzalloc(longs *
6034 						   sizeof(unsigned long),
6035 						   GFP_KERNEL);
6036 		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6037 			rc = -ENOMEM;
6038 			goto err_exit;
6039 		}
6040 		phba->sli4_hba.rpi_ids = kzalloc(count *
6041 						 sizeof(uint16_t),
6042 						 GFP_KERNEL);
6043 		if (unlikely(!phba->sli4_hba.rpi_ids)) {
6044 			rc = -ENOMEM;
6045 			goto free_rpi_bmask;
6046 		}
6047 
6048 		for (i = 0; i < count; i++)
6049 			phba->sli4_hba.rpi_ids[i] = base + i;
6050 
6051 		/* VPIs. */
6052 		count = phba->sli4_hba.max_cfg_param.max_vpi;
6053 		if (count <= 0) {
6054 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6055 					"3280 Invalid provisioning of "
6056 					"vpi:%d\n", count);
6057 			rc = -EINVAL;
6058 			goto free_rpi_ids;
6059 		}
6060 		base = phba->sli4_hba.max_cfg_param.vpi_base;
6061 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6062 		phba->vpi_bmask = kzalloc(longs *
6063 					  sizeof(unsigned long),
6064 					  GFP_KERNEL);
6065 		if (unlikely(!phba->vpi_bmask)) {
6066 			rc = -ENOMEM;
6067 			goto free_rpi_ids;
6068 		}
6069 		phba->vpi_ids = kzalloc(count *
6070 					sizeof(uint16_t),
6071 					GFP_KERNEL);
6072 		if (unlikely(!phba->vpi_ids)) {
6073 			rc = -ENOMEM;
6074 			goto free_vpi_bmask;
6075 		}
6076 
6077 		for (i = 0; i < count; i++)
6078 			phba->vpi_ids[i] = base + i;
6079 
6080 		/* XRIs. */
6081 		count = phba->sli4_hba.max_cfg_param.max_xri;
6082 		if (count <= 0) {
6083 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6084 					"3281 Invalid provisioning of "
6085 					"xri:%d\n", count);
6086 			rc = -EINVAL;
6087 			goto free_vpi_ids;
6088 		}
6089 		base = phba->sli4_hba.max_cfg_param.xri_base;
6090 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6091 		phba->sli4_hba.xri_bmask = kzalloc(longs *
6092 						   sizeof(unsigned long),
6093 						   GFP_KERNEL);
6094 		if (unlikely(!phba->sli4_hba.xri_bmask)) {
6095 			rc = -ENOMEM;
6096 			goto free_vpi_ids;
6097 		}
6098 		phba->sli4_hba.max_cfg_param.xri_used = 0;
6099 		phba->sli4_hba.xri_ids = kzalloc(count *
6100 						 sizeof(uint16_t),
6101 						 GFP_KERNEL);
6102 		if (unlikely(!phba->sli4_hba.xri_ids)) {
6103 			rc = -ENOMEM;
6104 			goto free_xri_bmask;
6105 		}
6106 
6107 		for (i = 0; i < count; i++)
6108 			phba->sli4_hba.xri_ids[i] = base + i;
6109 
6110 		/* VFIs. */
6111 		count = phba->sli4_hba.max_cfg_param.max_vfi;
6112 		if (count <= 0) {
6113 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6114 					"3282 Invalid provisioning of "
6115 					"vfi:%d\n", count);
6116 			rc = -EINVAL;
6117 			goto free_xri_ids;
6118 		}
6119 		base = phba->sli4_hba.max_cfg_param.vfi_base;
6120 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
6121 		phba->sli4_hba.vfi_bmask = kzalloc(longs *
6122 						   sizeof(unsigned long),
6123 						   GFP_KERNEL);
6124 		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6125 			rc = -ENOMEM;
6126 			goto free_xri_ids;
6127 		}
6128 		phba->sli4_hba.vfi_ids = kzalloc(count *
6129 						 sizeof(uint16_t),
6130 						 GFP_KERNEL);
6131 		if (unlikely(!phba->sli4_hba.vfi_ids)) {
6132 			rc = -ENOMEM;
6133 			goto free_vfi_bmask;
6134 		}
6135 
6136 		for (i = 0; i < count; i++)
6137 			phba->sli4_hba.vfi_ids[i] = base + i;
6138 
6139 		/*
6140 		 * Mark all resources ready.  An HBA reset doesn't need
6141 		 * to reset the initialization.
6142 		 */
6143 		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
6144 		       LPFC_IDX_RSRC_RDY);
6145 		return 0;
6146 	}
6147 
6148  free_vfi_bmask:
6149 	kfree(phba->sli4_hba.vfi_bmask);
6150 	phba->sli4_hba.vfi_bmask = NULL;
6151  free_xri_ids:
6152 	kfree(phba->sli4_hba.xri_ids);
6153 	phba->sli4_hba.xri_ids = NULL;
6154  free_xri_bmask:
6155 	kfree(phba->sli4_hba.xri_bmask);
6156 	phba->sli4_hba.xri_bmask = NULL;
6157  free_vpi_ids:
6158 	kfree(phba->vpi_ids);
6159 	phba->vpi_ids = NULL;
6160  free_vpi_bmask:
6161 	kfree(phba->vpi_bmask);
6162 	phba->vpi_bmask = NULL;
6163  free_rpi_ids:
6164 	kfree(phba->sli4_hba.rpi_ids);
6165 	phba->sli4_hba.rpi_ids = NULL;
6166  free_rpi_bmask:
6167 	kfree(phba->sli4_hba.rpi_bmask);
6168 	phba->sli4_hba.rpi_bmask = NULL;
6169  err_exit:
6170 	return rc;
6171 }
6172 
6173 /**
6174  * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6175  * @phba: Pointer to HBA context object.
6176  *
6177  * This function allocates the number of elements for the specified
6178  * resource type.
6179  **/
6180 int
6181 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
6182 {
6183 	if (phba->sli4_hba.extents_in_use) {
6184 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
6185 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
6186 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
6187 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
6188 	} else {
6189 		kfree(phba->vpi_bmask);
6190 		phba->sli4_hba.max_cfg_param.vpi_used = 0;
6191 		kfree(phba->vpi_ids);
6192 		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6193 		kfree(phba->sli4_hba.xri_bmask);
6194 		kfree(phba->sli4_hba.xri_ids);
6195 		kfree(phba->sli4_hba.vfi_bmask);
6196 		kfree(phba->sli4_hba.vfi_ids);
6197 		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6198 		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6199 	}
6200 
6201 	return 0;
6202 }
6203 
6204 /**
6205  * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6206  * @phba: Pointer to HBA context object.
6207  * @type: The resource extent type.
6208  * @extnt_count: buffer to hold port extent count response
6209  * @extnt_size: buffer to hold port extent size response.
6210  *
6211  * This function calls the port to read the host allocated extents
6212  * for a particular type.
6213  **/
6214 int
6215 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
6216 			       uint16_t *extnt_cnt, uint16_t *extnt_size)
6217 {
6218 	bool emb;
6219 	int rc = 0;
6220 	uint16_t curr_blks = 0;
6221 	uint32_t req_len, emb_len;
6222 	uint32_t alloc_len, mbox_tmo;
6223 	struct list_head *blk_list_head;
6224 	struct lpfc_rsrc_blks *rsrc_blk;
6225 	LPFC_MBOXQ_t *mbox;
6226 	void *virtaddr = NULL;
6227 	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6228 	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6229 	union  lpfc_sli4_cfg_shdr *shdr;
6230 
6231 	switch (type) {
6232 	case LPFC_RSC_TYPE_FCOE_VPI:
6233 		blk_list_head = &phba->lpfc_vpi_blk_list;
6234 		break;
6235 	case LPFC_RSC_TYPE_FCOE_XRI:
6236 		blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
6237 		break;
6238 	case LPFC_RSC_TYPE_FCOE_VFI:
6239 		blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
6240 		break;
6241 	case LPFC_RSC_TYPE_FCOE_RPI:
6242 		blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
6243 		break;
6244 	default:
6245 		return -EIO;
6246 	}
6247 
6248 	/* Count the number of extents currently allocatd for this type. */
6249 	list_for_each_entry(rsrc_blk, blk_list_head, list) {
6250 		if (curr_blks == 0) {
6251 			/*
6252 			 * The GET_ALLOCATED mailbox does not return the size,
6253 			 * just the count.  The size should be just the size
6254 			 * stored in the current allocated block and all sizes
6255 			 * for an extent type are the same so set the return
6256 			 * value now.
6257 			 */
6258 			*extnt_size = rsrc_blk->rsrc_size;
6259 		}
6260 		curr_blks++;
6261 	}
6262 
6263 	/*
6264 	 * Calculate the size of an embedded mailbox.  The uint32_t
6265 	 * accounts for extents-specific word.
6266 	 */
6267 	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6268 		sizeof(uint32_t);
6269 
6270 	/*
6271 	 * Presume the allocation and response will fit into an embedded
6272 	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
6273 	 */
6274 	emb = LPFC_SLI4_MBX_EMBED;
6275 	req_len = emb_len;
6276 	if (req_len > emb_len) {
6277 		req_len = curr_blks * sizeof(uint16_t) +
6278 			sizeof(union lpfc_sli4_cfg_shdr) +
6279 			sizeof(uint32_t);
6280 		emb = LPFC_SLI4_MBX_NEMBED;
6281 	}
6282 
6283 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6284 	if (!mbox)
6285 		return -ENOMEM;
6286 	memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
6287 
6288 	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6289 				     LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
6290 				     req_len, emb);
6291 	if (alloc_len < req_len) {
6292 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6293 			"2983 Allocated DMA memory size (x%x) is "
6294 			"less than the requested DMA memory "
6295 			"size (x%x)\n", alloc_len, req_len);
6296 		rc = -ENOMEM;
6297 		goto err_exit;
6298 	}
6299 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
6300 	if (unlikely(rc)) {
6301 		rc = -EIO;
6302 		goto err_exit;
6303 	}
6304 
6305 	if (!phba->sli4_hba.intr_enable)
6306 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6307 	else {
6308 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6309 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6310 	}
6311 
6312 	if (unlikely(rc)) {
6313 		rc = -EIO;
6314 		goto err_exit;
6315 	}
6316 
6317 	/*
6318 	 * Figure out where the response is located.  Then get local pointers
6319 	 * to the response data.  The port does not guarantee to respond to
6320 	 * all extents counts request so update the local variable with the
6321 	 * allocated count from the port.
6322 	 */
6323 	if (emb == LPFC_SLI4_MBX_EMBED) {
6324 		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6325 		shdr = &rsrc_ext->header.cfg_shdr;
6326 		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6327 	} else {
6328 		virtaddr = mbox->sge_array->addr[0];
6329 		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6330 		shdr = &n_rsrc->cfg_shdr;
6331 		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6332 	}
6333 
6334 	if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
6335 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
6336 			"2984 Failed to read allocated resources "
6337 			"for type %d - Status 0x%x Add'l Status 0x%x.\n",
6338 			type,
6339 			bf_get(lpfc_mbox_hdr_status, &shdr->response),
6340 			bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
6341 		rc = -EIO;
6342 		goto err_exit;
6343 	}
6344  err_exit:
6345 	lpfc_sli4_mbox_cmd_free(phba, mbox);
6346 	return rc;
6347 }
6348 
6349 /**
6350  * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
6351  * @phba: pointer to lpfc hba data structure.
6352  * @pring: Pointer to driver SLI ring object.
6353  * @sgl_list: linked link of sgl buffers to post
6354  * @cnt: number of linked list buffers
6355  *
6356  * This routine walks the list of buffers that have been allocated and
6357  * repost them to the port by using SGL block post. This is needed after a
6358  * pci_function_reset/warm_start or start. It attempts to construct blocks
6359  * of buffer sgls which contains contiguous xris and uses the non-embedded
6360  * SGL block post mailbox commands to post them to the port. For single
6361  * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6362  * mailbox command for posting.
6363  *
6364  * Returns: 0 = success, non-zero failure.
6365  **/
6366 static int
6367 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
6368 			  struct list_head *sgl_list, int cnt)
6369 {
6370 	struct lpfc_sglq *sglq_entry = NULL;
6371 	struct lpfc_sglq *sglq_entry_next = NULL;
6372 	struct lpfc_sglq *sglq_entry_first = NULL;
6373 	int status, total_cnt;
6374 	int post_cnt = 0, num_posted = 0, block_cnt = 0;
6375 	int last_xritag = NO_XRI;
6376 	LIST_HEAD(prep_sgl_list);
6377 	LIST_HEAD(blck_sgl_list);
6378 	LIST_HEAD(allc_sgl_list);
6379 	LIST_HEAD(post_sgl_list);
6380 	LIST_HEAD(free_sgl_list);
6381 
6382 	spin_lock_irq(&phba->hbalock);
6383 	spin_lock(&phba->sli4_hba.sgl_list_lock);
6384 	list_splice_init(sgl_list, &allc_sgl_list);
6385 	spin_unlock(&phba->sli4_hba.sgl_list_lock);
6386 	spin_unlock_irq(&phba->hbalock);
6387 
6388 	total_cnt = cnt;
6389 	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
6390 				 &allc_sgl_list, list) {
6391 		list_del_init(&sglq_entry->list);
6392 		block_cnt++;
6393 		if ((last_xritag != NO_XRI) &&
6394 		    (sglq_entry->sli4_xritag != last_xritag + 1)) {
6395 			/* a hole in xri block, form a sgl posting block */
6396 			list_splice_init(&prep_sgl_list, &blck_sgl_list);
6397 			post_cnt = block_cnt - 1;
6398 			/* prepare list for next posting block */
6399 			list_add_tail(&sglq_entry->list, &prep_sgl_list);
6400 			block_cnt = 1;
6401 		} else {
6402 			/* prepare list for next posting block */
6403 			list_add_tail(&sglq_entry->list, &prep_sgl_list);
6404 			/* enough sgls for non-embed sgl mbox command */
6405 			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
6406 				list_splice_init(&prep_sgl_list,
6407 						 &blck_sgl_list);
6408 				post_cnt = block_cnt;
6409 				block_cnt = 0;
6410 			}
6411 		}
6412 		num_posted++;
6413 
6414 		/* keep track of last sgl's xritag */
6415 		last_xritag = sglq_entry->sli4_xritag;
6416 
6417 		/* end of repost sgl list condition for buffers */
6418 		if (num_posted == total_cnt) {
6419 			if (post_cnt == 0) {
6420 				list_splice_init(&prep_sgl_list,
6421 						 &blck_sgl_list);
6422 				post_cnt = block_cnt;
6423 			} else if (block_cnt == 1) {
6424 				status = lpfc_sli4_post_sgl(phba,
6425 						sglq_entry->phys, 0,
6426 						sglq_entry->sli4_xritag);
6427 				if (!status) {
6428 					/* successful, put sgl to posted list */
6429 					list_add_tail(&sglq_entry->list,
6430 						      &post_sgl_list);
6431 				} else {
6432 					/* Failure, put sgl to free list */
6433 					lpfc_printf_log(phba, KERN_WARNING,
6434 						LOG_SLI,
6435 						"3159 Failed to post "
6436 						"sgl, xritag:x%x\n",
6437 						sglq_entry->sli4_xritag);
6438 					list_add_tail(&sglq_entry->list,
6439 						      &free_sgl_list);
6440 					total_cnt--;
6441 				}
6442 			}
6443 		}
6444 
6445 		/* continue until a nembed page worth of sgls */
6446 		if (post_cnt == 0)
6447 			continue;
6448 
6449 		/* post the buffer list sgls as a block */
6450 		status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
6451 						 post_cnt);
6452 
6453 		if (!status) {
6454 			/* success, put sgl list to posted sgl list */
6455 			list_splice_init(&blck_sgl_list, &post_sgl_list);
6456 		} else {
6457 			/* Failure, put sgl list to free sgl list */
6458 			sglq_entry_first = list_first_entry(&blck_sgl_list,
6459 							    struct lpfc_sglq,
6460 							    list);
6461 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6462 					"3160 Failed to post sgl-list, "
6463 					"xritag:x%x-x%x\n",
6464 					sglq_entry_first->sli4_xritag,
6465 					(sglq_entry_first->sli4_xritag +
6466 					 post_cnt - 1));
6467 			list_splice_init(&blck_sgl_list, &free_sgl_list);
6468 			total_cnt -= post_cnt;
6469 		}
6470 
6471 		/* don't reset xirtag due to hole in xri block */
6472 		if (block_cnt == 0)
6473 			last_xritag = NO_XRI;
6474 
6475 		/* reset sgl post count for next round of posting */
6476 		post_cnt = 0;
6477 	}
6478 
6479 	/* free the sgls failed to post */
6480 	lpfc_free_sgl_list(phba, &free_sgl_list);
6481 
6482 	/* push sgls posted to the available list */
6483 	if (!list_empty(&post_sgl_list)) {
6484 		spin_lock_irq(&phba->hbalock);
6485 		spin_lock(&phba->sli4_hba.sgl_list_lock);
6486 		list_splice_init(&post_sgl_list, sgl_list);
6487 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
6488 		spin_unlock_irq(&phba->hbalock);
6489 	} else {
6490 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6491 				"3161 Failure to post sgl to port.\n");
6492 		return -EIO;
6493 	}
6494 
6495 	/* return the number of XRIs actually posted */
6496 	return total_cnt;
6497 }
6498 
6499 void
6500 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
6501 {
6502 	uint32_t len;
6503 
6504 	len = sizeof(struct lpfc_mbx_set_host_data) -
6505 		sizeof(struct lpfc_sli4_cfg_mhdr);
6506 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6507 			 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
6508 			 LPFC_SLI4_MBX_EMBED);
6509 
6510 	mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
6511 	mbox->u.mqe.un.set_host_data.param_len =
6512 					LPFC_HOST_OS_DRIVER_VERSION_SIZE;
6513 	snprintf(mbox->u.mqe.un.set_host_data.data,
6514 		 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
6515 		 "Linux %s v"LPFC_DRIVER_VERSION,
6516 		 (phba->hba_flag & HBA_FCOE_MODE) ? "FCoE" : "FC");
6517 }
6518 
6519 int
6520 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
6521 		    struct lpfc_queue *drq, int count, int idx)
6522 {
6523 	int rc, i;
6524 	struct lpfc_rqe hrqe;
6525 	struct lpfc_rqe drqe;
6526 	struct lpfc_rqb *rqbp;
6527 	struct rqb_dmabuf *rqb_buffer;
6528 	LIST_HEAD(rqb_buf_list);
6529 
6530 	rqbp = hrq->rqbp;
6531 	for (i = 0; i < count; i++) {
6532 		/* IF RQ is already full, don't bother */
6533 		if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
6534 			break;
6535 		rqb_buffer = rqbp->rqb_alloc_buffer(phba);
6536 		if (!rqb_buffer)
6537 			break;
6538 		rqb_buffer->hrq = hrq;
6539 		rqb_buffer->drq = drq;
6540 		rqb_buffer->idx = idx;
6541 		list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
6542 	}
6543 	while (!list_empty(&rqb_buf_list)) {
6544 		list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
6545 				 hbuf.list);
6546 
6547 		hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
6548 		hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
6549 		drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
6550 		drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
6551 		rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
6552 		if (rc < 0) {
6553 			rqbp->rqb_free_buffer(phba, rqb_buffer);
6554 		} else {
6555 			list_add_tail(&rqb_buffer->hbuf.list,
6556 				      &rqbp->rqb_buffer_list);
6557 			rqbp->buffer_count++;
6558 		}
6559 	}
6560 	return 1;
6561 }
6562 
6563 /**
6564  * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
6565  * @phba: Pointer to HBA context object.
6566  *
6567  * This function is the main SLI4 device initialization PCI function. This
6568  * function is called by the HBA initialization code, HBA reset code and
6569  * HBA error attention handler code. Caller is not required to hold any
6570  * locks.
6571  **/
6572 int
6573 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
6574 {
6575 	int rc, i, cnt;
6576 	LPFC_MBOXQ_t *mboxq;
6577 	struct lpfc_mqe *mqe;
6578 	uint8_t *vpd;
6579 	uint32_t vpd_size;
6580 	uint32_t ftr_rsp = 0;
6581 	struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
6582 	struct lpfc_vport *vport = phba->pport;
6583 	struct lpfc_dmabuf *mp;
6584 	struct lpfc_rqb *rqbp;
6585 
6586 	/* Perform a PCI function reset to start from clean */
6587 	rc = lpfc_pci_function_reset(phba);
6588 	if (unlikely(rc))
6589 		return -ENODEV;
6590 
6591 	/* Check the HBA Host Status Register for readyness */
6592 	rc = lpfc_sli4_post_status_check(phba);
6593 	if (unlikely(rc))
6594 		return -ENODEV;
6595 	else {
6596 		spin_lock_irq(&phba->hbalock);
6597 		phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
6598 		spin_unlock_irq(&phba->hbalock);
6599 	}
6600 
6601 	/*
6602 	 * Allocate a single mailbox container for initializing the
6603 	 * port.
6604 	 */
6605 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6606 	if (!mboxq)
6607 		return -ENOMEM;
6608 
6609 	/* Issue READ_REV to collect vpd and FW information. */
6610 	vpd_size = SLI4_PAGE_SIZE;
6611 	vpd = kzalloc(vpd_size, GFP_KERNEL);
6612 	if (!vpd) {
6613 		rc = -ENOMEM;
6614 		goto out_free_mbox;
6615 	}
6616 
6617 	rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
6618 	if (unlikely(rc)) {
6619 		kfree(vpd);
6620 		goto out_free_mbox;
6621 	}
6622 
6623 	mqe = &mboxq->u.mqe;
6624 	phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
6625 	if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
6626 		phba->hba_flag |= HBA_FCOE_MODE;
6627 		phba->fcp_embed_io = 0;	/* SLI4 FC support only */
6628 	} else {
6629 		phba->hba_flag &= ~HBA_FCOE_MODE;
6630 	}
6631 
6632 	if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
6633 		LPFC_DCBX_CEE_MODE)
6634 		phba->hba_flag |= HBA_FIP_SUPPORT;
6635 	else
6636 		phba->hba_flag &= ~HBA_FIP_SUPPORT;
6637 
6638 	phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH;
6639 
6640 	if (phba->sli_rev != LPFC_SLI_REV4) {
6641 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6642 			"0376 READ_REV Error. SLI Level %d "
6643 			"FCoE enabled %d\n",
6644 			phba->sli_rev, phba->hba_flag & HBA_FCOE_MODE);
6645 		rc = -EIO;
6646 		kfree(vpd);
6647 		goto out_free_mbox;
6648 	}
6649 
6650 	/*
6651 	 * Continue initialization with default values even if driver failed
6652 	 * to read FCoE param config regions, only read parameters if the
6653 	 * board is FCoE
6654 	 */
6655 	if (phba->hba_flag & HBA_FCOE_MODE &&
6656 	    lpfc_sli4_read_fcoe_params(phba))
6657 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
6658 			"2570 Failed to read FCoE parameters\n");
6659 
6660 	/*
6661 	 * Retrieve sli4 device physical port name, failure of doing it
6662 	 * is considered as non-fatal.
6663 	 */
6664 	rc = lpfc_sli4_retrieve_pport_name(phba);
6665 	if (!rc)
6666 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6667 				"3080 Successful retrieving SLI4 device "
6668 				"physical port name: %s.\n", phba->Port);
6669 
6670 	/*
6671 	 * Evaluate the read rev and vpd data. Populate the driver
6672 	 * state with the results. If this routine fails, the failure
6673 	 * is not fatal as the driver will use generic values.
6674 	 */
6675 	rc = lpfc_parse_vpd(phba, vpd, vpd_size);
6676 	if (unlikely(!rc)) {
6677 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6678 				"0377 Error %d parsing vpd. "
6679 				"Using defaults.\n", rc);
6680 		rc = 0;
6681 	}
6682 	kfree(vpd);
6683 
6684 	/* Save information as VPD data */
6685 	phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
6686 	phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
6687 	phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
6688 	phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
6689 					 &mqe->un.read_rev);
6690 	phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
6691 				       &mqe->un.read_rev);
6692 	phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
6693 					    &mqe->un.read_rev);
6694 	phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
6695 					   &mqe->un.read_rev);
6696 	phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
6697 	memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
6698 	phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
6699 	memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
6700 	phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
6701 	memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
6702 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
6703 			"(%d):0380 READ_REV Status x%x "
6704 			"fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6705 			mboxq->vport ? mboxq->vport->vpi : 0,
6706 			bf_get(lpfc_mqe_status, mqe),
6707 			phba->vpd.rev.opFwName,
6708 			phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
6709 			phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
6710 
6711 	/* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3)  */
6712 	rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
6713 	if (phba->pport->cfg_lun_queue_depth > rc) {
6714 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6715 				"3362 LUN queue depth changed from %d to %d\n",
6716 				phba->pport->cfg_lun_queue_depth, rc);
6717 		phba->pport->cfg_lun_queue_depth = rc;
6718 	}
6719 
6720 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
6721 	    LPFC_SLI_INTF_IF_TYPE_0) {
6722 		lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
6723 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6724 		if (rc == MBX_SUCCESS) {
6725 			phba->hba_flag |= HBA_RECOVERABLE_UE;
6726 			/* Set 1Sec interval to detect UE */
6727 			phba->eratt_poll_interval = 1;
6728 			phba->sli4_hba.ue_to_sr = bf_get(
6729 					lpfc_mbx_set_feature_UESR,
6730 					&mboxq->u.mqe.un.set_feature);
6731 			phba->sli4_hba.ue_to_rp = bf_get(
6732 					lpfc_mbx_set_feature_UERP,
6733 					&mboxq->u.mqe.un.set_feature);
6734 		}
6735 	}
6736 
6737 	if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
6738 		/* Enable MDS Diagnostics only if the SLI Port supports it */
6739 		lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
6740 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6741 		if (rc != MBX_SUCCESS)
6742 			phba->mds_diags_support = 0;
6743 	}
6744 
6745 	/*
6746 	 * Discover the port's supported feature set and match it against the
6747 	 * hosts requests.
6748 	 */
6749 	lpfc_request_features(phba, mboxq);
6750 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6751 	if (unlikely(rc)) {
6752 		rc = -EIO;
6753 		goto out_free_mbox;
6754 	}
6755 
6756 	/*
6757 	 * The port must support FCP initiator mode as this is the
6758 	 * only mode running in the host.
6759 	 */
6760 	if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
6761 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6762 				"0378 No support for fcpi mode.\n");
6763 		ftr_rsp++;
6764 	}
6765 	if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
6766 		phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
6767 	else
6768 		phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
6769 	/*
6770 	 * If the port cannot support the host's requested features
6771 	 * then turn off the global config parameters to disable the
6772 	 * feature in the driver.  This is not a fatal error.
6773 	 */
6774 	phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
6775 	if (phba->cfg_enable_bg) {
6776 		if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))
6777 			phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
6778 		else
6779 			ftr_rsp++;
6780 	}
6781 
6782 	if (phba->max_vpi && phba->cfg_enable_npiv &&
6783 	    !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6784 		ftr_rsp++;
6785 
6786 	if (ftr_rsp) {
6787 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6788 				"0379 Feature Mismatch Data: x%08x %08x "
6789 				"x%x x%x x%x\n", mqe->un.req_ftrs.word2,
6790 				mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
6791 				phba->cfg_enable_npiv, phba->max_vpi);
6792 		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
6793 			phba->cfg_enable_bg = 0;
6794 		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
6795 			phba->cfg_enable_npiv = 0;
6796 	}
6797 
6798 	/* These SLI3 features are assumed in SLI4 */
6799 	spin_lock_irq(&phba->hbalock);
6800 	phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
6801 	spin_unlock_irq(&phba->hbalock);
6802 
6803 	/*
6804 	 * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
6805 	 * calls depends on these resources to complete port setup.
6806 	 */
6807 	rc = lpfc_sli4_alloc_resource_identifiers(phba);
6808 	if (rc) {
6809 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6810 				"2920 Failed to alloc Resource IDs "
6811 				"rc = x%x\n", rc);
6812 		goto out_free_mbox;
6813 	}
6814 
6815 	lpfc_set_host_data(phba, mboxq);
6816 
6817 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6818 	if (rc) {
6819 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
6820 				"2134 Failed to set host os driver version %x",
6821 				rc);
6822 	}
6823 
6824 	/* Read the port's service parameters. */
6825 	rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
6826 	if (rc) {
6827 		phba->link_state = LPFC_HBA_ERROR;
6828 		rc = -ENOMEM;
6829 		goto out_free_mbox;
6830 	}
6831 
6832 	mboxq->vport = vport;
6833 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6834 	mp = (struct lpfc_dmabuf *) mboxq->context1;
6835 	if (rc == MBX_SUCCESS) {
6836 		memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
6837 		rc = 0;
6838 	}
6839 
6840 	/*
6841 	 * This memory was allocated by the lpfc_read_sparam routine. Release
6842 	 * it to the mbuf pool.
6843 	 */
6844 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
6845 	kfree(mp);
6846 	mboxq->context1 = NULL;
6847 	if (unlikely(rc)) {
6848 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6849 				"0382 READ_SPARAM command failed "
6850 				"status %d, mbxStatus x%x\n",
6851 				rc, bf_get(lpfc_mqe_status, mqe));
6852 		phba->link_state = LPFC_HBA_ERROR;
6853 		rc = -EIO;
6854 		goto out_free_mbox;
6855 	}
6856 
6857 	lpfc_update_vport_wwn(vport);
6858 
6859 	/* Update the fc_host data structures with new wwn. */
6860 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
6861 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
6862 
6863 	/* Create all the SLI4 queues */
6864 	rc = lpfc_sli4_queue_create(phba);
6865 	if (rc) {
6866 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6867 				"3089 Failed to allocate queues\n");
6868 		rc = -ENODEV;
6869 		goto out_free_mbox;
6870 	}
6871 	/* Set up all the queues to the device */
6872 	rc = lpfc_sli4_queue_setup(phba);
6873 	if (unlikely(rc)) {
6874 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6875 				"0381 Error %d during queue setup.\n ", rc);
6876 		goto out_stop_timers;
6877 	}
6878 	/* Initialize the driver internal SLI layer lists. */
6879 	lpfc_sli4_setup(phba);
6880 	lpfc_sli4_queue_init(phba);
6881 
6882 	/* update host els xri-sgl sizes and mappings */
6883 	rc = lpfc_sli4_els_sgl_update(phba);
6884 	if (unlikely(rc)) {
6885 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6886 				"1400 Failed to update xri-sgl size and "
6887 				"mapping: %d\n", rc);
6888 		goto out_destroy_queue;
6889 	}
6890 
6891 	/* register the els sgl pool to the port */
6892 	rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
6893 				       phba->sli4_hba.els_xri_cnt);
6894 	if (unlikely(rc < 0)) {
6895 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6896 				"0582 Error %d during els sgl post "
6897 				"operation\n", rc);
6898 		rc = -ENODEV;
6899 		goto out_destroy_queue;
6900 	}
6901 	phba->sli4_hba.els_xri_cnt = rc;
6902 
6903 	if (phba->nvmet_support) {
6904 		/* update host nvmet xri-sgl sizes and mappings */
6905 		rc = lpfc_sli4_nvmet_sgl_update(phba);
6906 		if (unlikely(rc)) {
6907 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6908 					"6308 Failed to update nvmet-sgl size "
6909 					"and mapping: %d\n", rc);
6910 			goto out_destroy_queue;
6911 		}
6912 
6913 		/* register the nvmet sgl pool to the port */
6914 		rc = lpfc_sli4_repost_sgl_list(
6915 			phba,
6916 			&phba->sli4_hba.lpfc_nvmet_sgl_list,
6917 			phba->sli4_hba.nvmet_xri_cnt);
6918 		if (unlikely(rc < 0)) {
6919 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6920 					"3117 Error %d during nvmet "
6921 					"sgl post\n", rc);
6922 			rc = -ENODEV;
6923 			goto out_destroy_queue;
6924 		}
6925 		phba->sli4_hba.nvmet_xri_cnt = rc;
6926 
6927 		cnt = phba->cfg_iocb_cnt * 1024;
6928 		/* We need 1 iocbq for every SGL, for IO processing */
6929 		cnt += phba->sli4_hba.nvmet_xri_cnt;
6930 	} else {
6931 		/* update host scsi xri-sgl sizes and mappings */
6932 		rc = lpfc_sli4_scsi_sgl_update(phba);
6933 		if (unlikely(rc)) {
6934 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6935 					"6309 Failed to update scsi-sgl size "
6936 					"and mapping: %d\n", rc);
6937 			goto out_destroy_queue;
6938 		}
6939 
6940 		/* update host nvme xri-sgl sizes and mappings */
6941 		rc = lpfc_sli4_nvme_sgl_update(phba);
6942 		if (unlikely(rc)) {
6943 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6944 					"6082 Failed to update nvme-sgl size "
6945 					"and mapping: %d\n", rc);
6946 			goto out_destroy_queue;
6947 		}
6948 
6949 		cnt = phba->cfg_iocb_cnt * 1024;
6950 	}
6951 
6952 	if (!phba->sli.iocbq_lookup) {
6953 		/* Initialize and populate the iocb list per host */
6954 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
6955 				"2821 initialize iocb list %d total %d\n",
6956 				phba->cfg_iocb_cnt, cnt);
6957 		rc = lpfc_init_iocb_list(phba, cnt);
6958 		if (rc) {
6959 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6960 					"1413 Failed to init iocb list.\n");
6961 			goto out_destroy_queue;
6962 		}
6963 	}
6964 
6965 	if (phba->nvmet_support)
6966 		lpfc_nvmet_create_targetport(phba);
6967 
6968 	if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
6969 		/* Post initial buffers to all RQs created */
6970 		for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
6971 			rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
6972 			INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
6973 			rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
6974 			rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
6975 			rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
6976 			rqbp->buffer_count = 0;
6977 
6978 			lpfc_post_rq_buffer(
6979 				phba, phba->sli4_hba.nvmet_mrq_hdr[i],
6980 				phba->sli4_hba.nvmet_mrq_data[i],
6981 				LPFC_NVMET_RQE_DEF_COUNT, i);
6982 		}
6983 	}
6984 
6985 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
6986 		/* register the allocated scsi sgl pool to the port */
6987 		rc = lpfc_sli4_repost_scsi_sgl_list(phba);
6988 		if (unlikely(rc)) {
6989 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
6990 					"0383 Error %d during scsi sgl post "
6991 					"operation\n", rc);
6992 			/* Some Scsi buffers were moved to abort scsi list */
6993 			/* A pci function reset will repost them */
6994 			rc = -ENODEV;
6995 			goto out_destroy_queue;
6996 		}
6997 	}
6998 
6999 	if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
7000 	    (phba->nvmet_support == 0)) {
7001 
7002 		/* register the allocated nvme sgl pool to the port */
7003 		rc = lpfc_repost_nvme_sgl_list(phba);
7004 		if (unlikely(rc)) {
7005 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7006 					"6116 Error %d during nvme sgl post "
7007 					"operation\n", rc);
7008 			/* Some NVME buffers were moved to abort nvme list */
7009 			/* A pci function reset will repost them */
7010 			rc = -ENODEV;
7011 			goto out_destroy_queue;
7012 		}
7013 	}
7014 
7015 	/* Post the rpi header region to the device. */
7016 	rc = lpfc_sli4_post_all_rpi_hdrs(phba);
7017 	if (unlikely(rc)) {
7018 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7019 				"0393 Error %d during rpi post operation\n",
7020 				rc);
7021 		rc = -ENODEV;
7022 		goto out_destroy_queue;
7023 	}
7024 	lpfc_sli4_node_prep(phba);
7025 
7026 	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
7027 		if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
7028 			/*
7029 			 * The FC Port needs to register FCFI (index 0)
7030 			 */
7031 			lpfc_reg_fcfi(phba, mboxq);
7032 			mboxq->vport = phba->pport;
7033 			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7034 			if (rc != MBX_SUCCESS)
7035 				goto out_unset_queue;
7036 			rc = 0;
7037 			phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
7038 						&mboxq->u.mqe.un.reg_fcfi);
7039 		} else {
7040 			/* We are a NVME Target mode with MRQ > 1 */
7041 
7042 			/* First register the FCFI */
7043 			lpfc_reg_fcfi_mrq(phba, mboxq, 0);
7044 			mboxq->vport = phba->pport;
7045 			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7046 			if (rc != MBX_SUCCESS)
7047 				goto out_unset_queue;
7048 			rc = 0;
7049 			phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
7050 						&mboxq->u.mqe.un.reg_fcfi_mrq);
7051 
7052 			/* Next register the MRQs */
7053 			lpfc_reg_fcfi_mrq(phba, mboxq, 1);
7054 			mboxq->vport = phba->pport;
7055 			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7056 			if (rc != MBX_SUCCESS)
7057 				goto out_unset_queue;
7058 			rc = 0;
7059 		}
7060 		/* Check if the port is configured to be disabled */
7061 		lpfc_sli_read_link_ste(phba);
7062 	}
7063 
7064 	/* Arm the CQs and then EQs on device */
7065 	lpfc_sli4_arm_cqeq_intr(phba);
7066 
7067 	/* Indicate device interrupt mode */
7068 	phba->sli4_hba.intr_enable = 1;
7069 
7070 	/* Allow asynchronous mailbox command to go through */
7071 	spin_lock_irq(&phba->hbalock);
7072 	phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7073 	spin_unlock_irq(&phba->hbalock);
7074 
7075 	/* Post receive buffers to the device */
7076 	lpfc_sli4_rb_setup(phba);
7077 
7078 	/* Reset HBA FCF states after HBA reset */
7079 	phba->fcf.fcf_flag = 0;
7080 	phba->fcf.current_rec.flag = 0;
7081 
7082 	/* Start the ELS watchdog timer */
7083 	mod_timer(&vport->els_tmofunc,
7084 		  jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
7085 
7086 	/* Start heart beat timer */
7087 	mod_timer(&phba->hb_tmofunc,
7088 		  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
7089 	phba->hb_outstanding = 0;
7090 	phba->last_completion_time = jiffies;
7091 
7092 	/* Start error attention (ERATT) polling timer */
7093 	mod_timer(&phba->eratt_poll,
7094 		  jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
7095 
7096 	/* Enable PCIe device Advanced Error Reporting (AER) if configured */
7097 	if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
7098 		rc = pci_enable_pcie_error_reporting(phba->pcidev);
7099 		if (!rc) {
7100 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7101 					"2829 This device supports "
7102 					"Advanced Error Reporting (AER)\n");
7103 			spin_lock_irq(&phba->hbalock);
7104 			phba->hba_flag |= HBA_AER_ENABLED;
7105 			spin_unlock_irq(&phba->hbalock);
7106 		} else {
7107 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7108 					"2830 This device does not support "
7109 					"Advanced Error Reporting (AER)\n");
7110 			phba->cfg_aer_support = 0;
7111 		}
7112 		rc = 0;
7113 	}
7114 
7115 	/*
7116 	 * The port is ready, set the host's link state to LINK_DOWN
7117 	 * in preparation for link interrupts.
7118 	 */
7119 	spin_lock_irq(&phba->hbalock);
7120 	phba->link_state = LPFC_LINK_DOWN;
7121 	spin_unlock_irq(&phba->hbalock);
7122 	if (!(phba->hba_flag & HBA_FCOE_MODE) &&
7123 	    (phba->hba_flag & LINK_DISABLED)) {
7124 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7125 				"3103 Adapter Link is disabled.\n");
7126 		lpfc_down_link(phba, mboxq);
7127 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
7128 		if (rc != MBX_SUCCESS) {
7129 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
7130 					"3104 Adapter failed to issue "
7131 					"DOWN_LINK mbox cmd, rc:x%x\n", rc);
7132 			goto out_unset_queue;
7133 		}
7134 	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
7135 		/* don't perform init_link on SLI4 FC port loopback test */
7136 		if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
7137 			rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
7138 			if (rc)
7139 				goto out_unset_queue;
7140 		}
7141 	}
7142 	mempool_free(mboxq, phba->mbox_mem_pool);
7143 	return rc;
7144 out_unset_queue:
7145 	/* Unset all the queues set up in this routine when error out */
7146 	lpfc_sli4_queue_unset(phba);
7147 out_destroy_queue:
7148 	lpfc_free_iocb_list(phba);
7149 	lpfc_sli4_queue_destroy(phba);
7150 out_stop_timers:
7151 	lpfc_stop_hba_timers(phba);
7152 out_free_mbox:
7153 	mempool_free(mboxq, phba->mbox_mem_pool);
7154 	return rc;
7155 }
7156 
7157 /**
7158  * lpfc_mbox_timeout - Timeout call back function for mbox timer
7159  * @ptr: context object - pointer to hba structure.
7160  *
7161  * This is the callback function for mailbox timer. The mailbox
7162  * timer is armed when a new mailbox command is issued and the timer
7163  * is deleted when the mailbox complete. The function is called by
7164  * the kernel timer code when a mailbox does not complete within
7165  * expected time. This function wakes up the worker thread to
7166  * process the mailbox timeout and returns. All the processing is
7167  * done by the worker thread function lpfc_mbox_timeout_handler.
7168  **/
7169 void
7170 lpfc_mbox_timeout(unsigned long ptr)
7171 {
7172 	struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
7173 	unsigned long iflag;
7174 	uint32_t tmo_posted;
7175 
7176 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
7177 	tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
7178 	if (!tmo_posted)
7179 		phba->pport->work_port_events |= WORKER_MBOX_TMO;
7180 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
7181 
7182 	if (!tmo_posted)
7183 		lpfc_worker_wake_up(phba);
7184 	return;
7185 }
7186 
7187 /**
7188  * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7189  *                                    are pending
7190  * @phba: Pointer to HBA context object.
7191  *
7192  * This function checks if any mailbox completions are present on the mailbox
7193  * completion queue.
7194  **/
7195 static bool
7196 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
7197 {
7198 
7199 	uint32_t idx;
7200 	struct lpfc_queue *mcq;
7201 	struct lpfc_mcqe *mcqe;
7202 	bool pending_completions = false;
7203 
7204 	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7205 		return false;
7206 
7207 	/* Check for completions on mailbox completion queue */
7208 
7209 	mcq = phba->sli4_hba.mbx_cq;
7210 	idx = mcq->hba_index;
7211 	while (bf_get_le32(lpfc_cqe_valid, mcq->qe[idx].cqe)) {
7212 		mcqe = (struct lpfc_mcqe *)mcq->qe[idx].cqe;
7213 		if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
7214 		    (!bf_get_le32(lpfc_trailer_async, mcqe))) {
7215 			pending_completions = true;
7216 			break;
7217 		}
7218 		idx = (idx + 1) % mcq->entry_count;
7219 		if (mcq->hba_index == idx)
7220 			break;
7221 	}
7222 	return pending_completions;
7223 
7224 }
7225 
7226 /**
7227  * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7228  *					      that were missed.
7229  * @phba: Pointer to HBA context object.
7230  *
7231  * For sli4, it is possible to miss an interrupt. As such mbox completions
7232  * maybe missed causing erroneous mailbox timeouts to occur. This function
7233  * checks to see if mbox completions are on the mailbox completion queue
7234  * and will process all the completions associated with the eq for the
7235  * mailbox completion queue.
7236  **/
7237 bool
7238 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
7239 {
7240 
7241 	uint32_t eqidx;
7242 	struct lpfc_queue *fpeq = NULL;
7243 	struct lpfc_eqe *eqe;
7244 	bool mbox_pending;
7245 
7246 	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
7247 		return false;
7248 
7249 	/* Find the eq associated with the mcq */
7250 
7251 	if (phba->sli4_hba.hba_eq)
7252 		for (eqidx = 0; eqidx < phba->io_channel_irqs; eqidx++)
7253 			if (phba->sli4_hba.hba_eq[eqidx]->queue_id ==
7254 			    phba->sli4_hba.mbx_cq->assoc_qid) {
7255 				fpeq = phba->sli4_hba.hba_eq[eqidx];
7256 				break;
7257 			}
7258 	if (!fpeq)
7259 		return false;
7260 
7261 	/* Turn off interrupts from this EQ */
7262 
7263 	lpfc_sli4_eq_clr_intr(fpeq);
7264 
7265 	/* Check to see if a mbox completion is pending */
7266 
7267 	mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
7268 
7269 	/*
7270 	 * If a mbox completion is pending, process all the events on EQ
7271 	 * associated with the mbox completion queue (this could include
7272 	 * mailbox commands, async events, els commands, receive queue data
7273 	 * and fcp commands)
7274 	 */
7275 
7276 	if (mbox_pending)
7277 		while ((eqe = lpfc_sli4_eq_get(fpeq))) {
7278 			lpfc_sli4_hba_handle_eqe(phba, eqe, eqidx);
7279 			fpeq->EQ_processed++;
7280 		}
7281 
7282 	/* Always clear and re-arm the EQ */
7283 
7284 	lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
7285 
7286 	return mbox_pending;
7287 
7288 }
7289 
7290 /**
7291  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7292  * @phba: Pointer to HBA context object.
7293  *
7294  * This function is called from worker thread when a mailbox command times out.
7295  * The caller is not required to hold any locks. This function will reset the
7296  * HBA and recover all the pending commands.
7297  **/
7298 void
7299 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
7300 {
7301 	LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
7302 	MAILBOX_t *mb = NULL;
7303 
7304 	struct lpfc_sli *psli = &phba->sli;
7305 
7306 	/* If the mailbox completed, process the completion and return */
7307 	if (lpfc_sli4_process_missed_mbox_completions(phba))
7308 		return;
7309 
7310 	if (pmbox != NULL)
7311 		mb = &pmbox->u.mb;
7312 	/* Check the pmbox pointer first.  There is a race condition
7313 	 * between the mbox timeout handler getting executed in the
7314 	 * worklist and the mailbox actually completing. When this
7315 	 * race condition occurs, the mbox_active will be NULL.
7316 	 */
7317 	spin_lock_irq(&phba->hbalock);
7318 	if (pmbox == NULL) {
7319 		lpfc_printf_log(phba, KERN_WARNING,
7320 				LOG_MBOX | LOG_SLI,
7321 				"0353 Active Mailbox cleared - mailbox timeout "
7322 				"exiting\n");
7323 		spin_unlock_irq(&phba->hbalock);
7324 		return;
7325 	}
7326 
7327 	/* Mbox cmd <mbxCommand> timeout */
7328 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7329 			"0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
7330 			mb->mbxCommand,
7331 			phba->pport->port_state,
7332 			phba->sli.sli_flag,
7333 			phba->sli.mbox_active);
7334 	spin_unlock_irq(&phba->hbalock);
7335 
7336 	/* Setting state unknown so lpfc_sli_abort_iocb_ring
7337 	 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
7338 	 * it to fail all outstanding SCSI IO.
7339 	 */
7340 	spin_lock_irq(&phba->pport->work_port_lock);
7341 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
7342 	spin_unlock_irq(&phba->pport->work_port_lock);
7343 	spin_lock_irq(&phba->hbalock);
7344 	phba->link_state = LPFC_LINK_UNKNOWN;
7345 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
7346 	spin_unlock_irq(&phba->hbalock);
7347 
7348 	lpfc_sli_abort_fcp_rings(phba);
7349 
7350 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7351 			"0345 Resetting board due to mailbox timeout\n");
7352 
7353 	/* Reset the HBA device */
7354 	lpfc_reset_hba(phba);
7355 }
7356 
7357 /**
7358  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
7359  * @phba: Pointer to HBA context object.
7360  * @pmbox: Pointer to mailbox object.
7361  * @flag: Flag indicating how the mailbox need to be processed.
7362  *
7363  * This function is called by discovery code and HBA management code
7364  * to submit a mailbox command to firmware with SLI-3 interface spec. This
7365  * function gets the hbalock to protect the data structures.
7366  * The mailbox command can be submitted in polling mode, in which case
7367  * this function will wait in a polling loop for the completion of the
7368  * mailbox.
7369  * If the mailbox is submitted in no_wait mode (not polling) the
7370  * function will submit the command and returns immediately without waiting
7371  * for the mailbox completion. The no_wait is supported only when HBA
7372  * is in SLI2/SLI3 mode - interrupts are enabled.
7373  * The SLI interface allows only one mailbox pending at a time. If the
7374  * mailbox is issued in polling mode and there is already a mailbox
7375  * pending, then the function will return an error. If the mailbox is issued
7376  * in NO_WAIT mode and there is a mailbox pending already, the function
7377  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
7378  * The sli layer owns the mailbox object until the completion of mailbox
7379  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
7380  * return codes the caller owns the mailbox command after the return of
7381  * the function.
7382  **/
7383 static int
7384 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
7385 		       uint32_t flag)
7386 {
7387 	MAILBOX_t *mbx;
7388 	struct lpfc_sli *psli = &phba->sli;
7389 	uint32_t status, evtctr;
7390 	uint32_t ha_copy, hc_copy;
7391 	int i;
7392 	unsigned long timeout;
7393 	unsigned long drvr_flag = 0;
7394 	uint32_t word0, ldata;
7395 	void __iomem *to_slim;
7396 	int processing_queue = 0;
7397 
7398 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
7399 	if (!pmbox) {
7400 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7401 		/* processing mbox queue from intr_handler */
7402 		if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7403 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7404 			return MBX_SUCCESS;
7405 		}
7406 		processing_queue = 1;
7407 		pmbox = lpfc_mbox_get(phba);
7408 		if (!pmbox) {
7409 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7410 			return MBX_SUCCESS;
7411 		}
7412 	}
7413 
7414 	if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
7415 		pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
7416 		if(!pmbox->vport) {
7417 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7418 			lpfc_printf_log(phba, KERN_ERR,
7419 					LOG_MBOX | LOG_VPORT,
7420 					"1806 Mbox x%x failed. No vport\n",
7421 					pmbox->u.mb.mbxCommand);
7422 			dump_stack();
7423 			goto out_not_finished;
7424 		}
7425 	}
7426 
7427 	/* If the PCI channel is in offline state, do not post mbox. */
7428 	if (unlikely(pci_channel_offline(phba->pcidev))) {
7429 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7430 		goto out_not_finished;
7431 	}
7432 
7433 	/* If HBA has a deferred error attention, fail the iocb. */
7434 	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
7435 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7436 		goto out_not_finished;
7437 	}
7438 
7439 	psli = &phba->sli;
7440 
7441 	mbx = &pmbox->u.mb;
7442 	status = MBX_SUCCESS;
7443 
7444 	if (phba->link_state == LPFC_HBA_ERROR) {
7445 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7446 
7447 		/* Mbox command <mbxCommand> cannot issue */
7448 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7449 				"(%d):0311 Mailbox command x%x cannot "
7450 				"issue Data: x%x x%x\n",
7451 				pmbox->vport ? pmbox->vport->vpi : 0,
7452 				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7453 		goto out_not_finished;
7454 	}
7455 
7456 	if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
7457 		if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
7458 			!(hc_copy & HC_MBINT_ENA)) {
7459 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7460 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7461 				"(%d):2528 Mailbox command x%x cannot "
7462 				"issue Data: x%x x%x\n",
7463 				pmbox->vport ? pmbox->vport->vpi : 0,
7464 				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
7465 			goto out_not_finished;
7466 		}
7467 	}
7468 
7469 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7470 		/* Polling for a mbox command when another one is already active
7471 		 * is not allowed in SLI. Also, the driver must have established
7472 		 * SLI2 mode to queue and process multiple mbox commands.
7473 		 */
7474 
7475 		if (flag & MBX_POLL) {
7476 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7477 
7478 			/* Mbox command <mbxCommand> cannot issue */
7479 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7480 					"(%d):2529 Mailbox command x%x "
7481 					"cannot issue Data: x%x x%x\n",
7482 					pmbox->vport ? pmbox->vport->vpi : 0,
7483 					pmbox->u.mb.mbxCommand,
7484 					psli->sli_flag, flag);
7485 			goto out_not_finished;
7486 		}
7487 
7488 		if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
7489 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7490 			/* Mbox command <mbxCommand> cannot issue */
7491 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7492 					"(%d):2530 Mailbox command x%x "
7493 					"cannot issue Data: x%x x%x\n",
7494 					pmbox->vport ? pmbox->vport->vpi : 0,
7495 					pmbox->u.mb.mbxCommand,
7496 					psli->sli_flag, flag);
7497 			goto out_not_finished;
7498 		}
7499 
7500 		/* Another mailbox command is still being processed, queue this
7501 		 * command to be processed later.
7502 		 */
7503 		lpfc_mbox_put(phba, pmbox);
7504 
7505 		/* Mbox cmd issue - BUSY */
7506 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7507 				"(%d):0308 Mbox cmd issue - BUSY Data: "
7508 				"x%x x%x x%x x%x\n",
7509 				pmbox->vport ? pmbox->vport->vpi : 0xffffff,
7510 				mbx->mbxCommand,
7511 				phba->pport ? phba->pport->port_state : 0xff,
7512 				psli->sli_flag, flag);
7513 
7514 		psli->slistat.mbox_busy++;
7515 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7516 
7517 		if (pmbox->vport) {
7518 			lpfc_debugfs_disc_trc(pmbox->vport,
7519 				LPFC_DISC_TRC_MBOX_VPORT,
7520 				"MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
7521 				(uint32_t)mbx->mbxCommand,
7522 				mbx->un.varWords[0], mbx->un.varWords[1]);
7523 		}
7524 		else {
7525 			lpfc_debugfs_disc_trc(phba->pport,
7526 				LPFC_DISC_TRC_MBOX,
7527 				"MBOX Bsy:        cmd:x%x mb:x%x x%x",
7528 				(uint32_t)mbx->mbxCommand,
7529 				mbx->un.varWords[0], mbx->un.varWords[1]);
7530 		}
7531 
7532 		return MBX_BUSY;
7533 	}
7534 
7535 	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7536 
7537 	/* If we are not polling, we MUST be in SLI2 mode */
7538 	if (flag != MBX_POLL) {
7539 		if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
7540 		    (mbx->mbxCommand != MBX_KILL_BOARD)) {
7541 			psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7542 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7543 			/* Mbox command <mbxCommand> cannot issue */
7544 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7545 					"(%d):2531 Mailbox command x%x "
7546 					"cannot issue Data: x%x x%x\n",
7547 					pmbox->vport ? pmbox->vport->vpi : 0,
7548 					pmbox->u.mb.mbxCommand,
7549 					psli->sli_flag, flag);
7550 			goto out_not_finished;
7551 		}
7552 		/* timeout active mbox command */
7553 		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7554 					   1000);
7555 		mod_timer(&psli->mbox_tmo, jiffies + timeout);
7556 	}
7557 
7558 	/* Mailbox cmd <cmd> issue */
7559 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
7560 			"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
7561 			"x%x\n",
7562 			pmbox->vport ? pmbox->vport->vpi : 0,
7563 			mbx->mbxCommand,
7564 			phba->pport ? phba->pport->port_state : 0xff,
7565 			psli->sli_flag, flag);
7566 
7567 	if (mbx->mbxCommand != MBX_HEARTBEAT) {
7568 		if (pmbox->vport) {
7569 			lpfc_debugfs_disc_trc(pmbox->vport,
7570 				LPFC_DISC_TRC_MBOX_VPORT,
7571 				"MBOX Send vport: cmd:x%x mb:x%x x%x",
7572 				(uint32_t)mbx->mbxCommand,
7573 				mbx->un.varWords[0], mbx->un.varWords[1]);
7574 		}
7575 		else {
7576 			lpfc_debugfs_disc_trc(phba->pport,
7577 				LPFC_DISC_TRC_MBOX,
7578 				"MBOX Send:       cmd:x%x mb:x%x x%x",
7579 				(uint32_t)mbx->mbxCommand,
7580 				mbx->un.varWords[0], mbx->un.varWords[1]);
7581 		}
7582 	}
7583 
7584 	psli->slistat.mbox_cmd++;
7585 	evtctr = psli->slistat.mbox_event;
7586 
7587 	/* next set own bit for the adapter and copy over command word */
7588 	mbx->mbxOwner = OWN_CHIP;
7589 
7590 	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7591 		/* Populate mbox extension offset word. */
7592 		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
7593 			*(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7594 				= (uint8_t *)phba->mbox_ext
7595 				  - (uint8_t *)phba->mbox;
7596 		}
7597 
7598 		/* Copy the mailbox extension data */
7599 		if (pmbox->in_ext_byte_len && pmbox->context2) {
7600 			lpfc_sli_pcimem_bcopy(pmbox->context2,
7601 				(uint8_t *)phba->mbox_ext,
7602 				pmbox->in_ext_byte_len);
7603 		}
7604 		/* Copy command data to host SLIM area */
7605 		lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
7606 	} else {
7607 		/* Populate mbox extension offset word. */
7608 		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
7609 			*(((uint32_t *)mbx) + pmbox->mbox_offset_word)
7610 				= MAILBOX_HBA_EXT_OFFSET;
7611 
7612 		/* Copy the mailbox extension data */
7613 		if (pmbox->in_ext_byte_len && pmbox->context2)
7614 			lpfc_memcpy_to_slim(phba->MBslimaddr +
7615 				MAILBOX_HBA_EXT_OFFSET,
7616 				pmbox->context2, pmbox->in_ext_byte_len);
7617 
7618 		if (mbx->mbxCommand == MBX_CONFIG_PORT)
7619 			/* copy command data into host mbox for cmpl */
7620 			lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
7621 					      MAILBOX_CMD_SIZE);
7622 
7623 		/* First copy mbox command data to HBA SLIM, skip past first
7624 		   word */
7625 		to_slim = phba->MBslimaddr + sizeof (uint32_t);
7626 		lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
7627 			    MAILBOX_CMD_SIZE - sizeof (uint32_t));
7628 
7629 		/* Next copy over first word, with mbxOwner set */
7630 		ldata = *((uint32_t *)mbx);
7631 		to_slim = phba->MBslimaddr;
7632 		writel(ldata, to_slim);
7633 		readl(to_slim); /* flush */
7634 
7635 		if (mbx->mbxCommand == MBX_CONFIG_PORT)
7636 			/* switch over to host mailbox */
7637 			psli->sli_flag |= LPFC_SLI_ACTIVE;
7638 	}
7639 
7640 	wmb();
7641 
7642 	switch (flag) {
7643 	case MBX_NOWAIT:
7644 		/* Set up reference to mailbox command */
7645 		psli->mbox_active = pmbox;
7646 		/* Interrupt board to do it */
7647 		writel(CA_MBATT, phba->CAregaddr);
7648 		readl(phba->CAregaddr); /* flush */
7649 		/* Don't wait for it to finish, just return */
7650 		break;
7651 
7652 	case MBX_POLL:
7653 		/* Set up null reference to mailbox command */
7654 		psli->mbox_active = NULL;
7655 		/* Interrupt board to do it */
7656 		writel(CA_MBATT, phba->CAregaddr);
7657 		readl(phba->CAregaddr); /* flush */
7658 
7659 		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7660 			/* First read mbox status word */
7661 			word0 = *((uint32_t *)phba->mbox);
7662 			word0 = le32_to_cpu(word0);
7663 		} else {
7664 			/* First read mbox status word */
7665 			if (lpfc_readl(phba->MBslimaddr, &word0)) {
7666 				spin_unlock_irqrestore(&phba->hbalock,
7667 						       drvr_flag);
7668 				goto out_not_finished;
7669 			}
7670 		}
7671 
7672 		/* Read the HBA Host Attention Register */
7673 		if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7674 			spin_unlock_irqrestore(&phba->hbalock,
7675 						       drvr_flag);
7676 			goto out_not_finished;
7677 		}
7678 		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
7679 							1000) + jiffies;
7680 		i = 0;
7681 		/* Wait for command to complete */
7682 		while (((word0 & OWN_CHIP) == OWN_CHIP) ||
7683 		       (!(ha_copy & HA_MBATT) &&
7684 			(phba->link_state > LPFC_WARM_START))) {
7685 			if (time_after(jiffies, timeout)) {
7686 				psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7687 				spin_unlock_irqrestore(&phba->hbalock,
7688 						       drvr_flag);
7689 				goto out_not_finished;
7690 			}
7691 
7692 			/* Check if we took a mbox interrupt while we were
7693 			   polling */
7694 			if (((word0 & OWN_CHIP) != OWN_CHIP)
7695 			    && (evtctr != psli->slistat.mbox_event))
7696 				break;
7697 
7698 			if (i++ > 10) {
7699 				spin_unlock_irqrestore(&phba->hbalock,
7700 						       drvr_flag);
7701 				msleep(1);
7702 				spin_lock_irqsave(&phba->hbalock, drvr_flag);
7703 			}
7704 
7705 			if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7706 				/* First copy command data */
7707 				word0 = *((uint32_t *)phba->mbox);
7708 				word0 = le32_to_cpu(word0);
7709 				if (mbx->mbxCommand == MBX_CONFIG_PORT) {
7710 					MAILBOX_t *slimmb;
7711 					uint32_t slimword0;
7712 					/* Check real SLIM for any errors */
7713 					slimword0 = readl(phba->MBslimaddr);
7714 					slimmb = (MAILBOX_t *) & slimword0;
7715 					if (((slimword0 & OWN_CHIP) != OWN_CHIP)
7716 					    && slimmb->mbxStatus) {
7717 						psli->sli_flag &=
7718 						    ~LPFC_SLI_ACTIVE;
7719 						word0 = slimword0;
7720 					}
7721 				}
7722 			} else {
7723 				/* First copy command data */
7724 				word0 = readl(phba->MBslimaddr);
7725 			}
7726 			/* Read the HBA Host Attention Register */
7727 			if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
7728 				spin_unlock_irqrestore(&phba->hbalock,
7729 						       drvr_flag);
7730 				goto out_not_finished;
7731 			}
7732 		}
7733 
7734 		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
7735 			/* copy results back to user */
7736 			lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
7737 						MAILBOX_CMD_SIZE);
7738 			/* Copy the mailbox extension data */
7739 			if (pmbox->out_ext_byte_len && pmbox->context2) {
7740 				lpfc_sli_pcimem_bcopy(phba->mbox_ext,
7741 						      pmbox->context2,
7742 						      pmbox->out_ext_byte_len);
7743 			}
7744 		} else {
7745 			/* First copy command data */
7746 			lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
7747 						MAILBOX_CMD_SIZE);
7748 			/* Copy the mailbox extension data */
7749 			if (pmbox->out_ext_byte_len && pmbox->context2) {
7750 				lpfc_memcpy_from_slim(pmbox->context2,
7751 					phba->MBslimaddr +
7752 					MAILBOX_HBA_EXT_OFFSET,
7753 					pmbox->out_ext_byte_len);
7754 			}
7755 		}
7756 
7757 		writel(HA_MBATT, phba->HAregaddr);
7758 		readl(phba->HAregaddr); /* flush */
7759 
7760 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
7761 		status = mbx->mbxStatus;
7762 	}
7763 
7764 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
7765 	return status;
7766 
7767 out_not_finished:
7768 	if (processing_queue) {
7769 		pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
7770 		lpfc_mbox_cmpl_put(phba, pmbox);
7771 	}
7772 	return MBX_NOT_FINISHED;
7773 }
7774 
7775 /**
7776  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7777  * @phba: Pointer to HBA context object.
7778  *
7779  * The function blocks the posting of SLI4 asynchronous mailbox commands from
7780  * the driver internal pending mailbox queue. It will then try to wait out the
7781  * possible outstanding mailbox command before return.
7782  *
7783  * Returns:
7784  * 	0 - the outstanding mailbox command completed; otherwise, the wait for
7785  * 	the outstanding mailbox command timed out.
7786  **/
7787 static int
7788 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
7789 {
7790 	struct lpfc_sli *psli = &phba->sli;
7791 	int rc = 0;
7792 	unsigned long timeout = 0;
7793 
7794 	/* Mark the asynchronous mailbox command posting as blocked */
7795 	spin_lock_irq(&phba->hbalock);
7796 	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
7797 	/* Determine how long we might wait for the active mailbox
7798 	 * command to be gracefully completed by firmware.
7799 	 */
7800 	if (phba->sli.mbox_active)
7801 		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
7802 						phba->sli.mbox_active) *
7803 						1000) + jiffies;
7804 	spin_unlock_irq(&phba->hbalock);
7805 
7806 	/* Make sure the mailbox is really active */
7807 	if (timeout)
7808 		lpfc_sli4_process_missed_mbox_completions(phba);
7809 
7810 	/* Wait for the outstnading mailbox command to complete */
7811 	while (phba->sli.mbox_active) {
7812 		/* Check active mailbox complete status every 2ms */
7813 		msleep(2);
7814 		if (time_after(jiffies, timeout)) {
7815 			/* Timeout, marked the outstanding cmd not complete */
7816 			rc = 1;
7817 			break;
7818 		}
7819 	}
7820 
7821 	/* Can not cleanly block async mailbox command, fails it */
7822 	if (rc) {
7823 		spin_lock_irq(&phba->hbalock);
7824 		psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7825 		spin_unlock_irq(&phba->hbalock);
7826 	}
7827 	return rc;
7828 }
7829 
7830 /**
7831  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7832  * @phba: Pointer to HBA context object.
7833  *
7834  * The function unblocks and resume posting of SLI4 asynchronous mailbox
7835  * commands from the driver internal pending mailbox queue. It makes sure
7836  * that there is no outstanding mailbox command before resuming posting
7837  * asynchronous mailbox commands. If, for any reason, there is outstanding
7838  * mailbox command, it will try to wait it out before resuming asynchronous
7839  * mailbox command posting.
7840  **/
7841 static void
7842 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
7843 {
7844 	struct lpfc_sli *psli = &phba->sli;
7845 
7846 	spin_lock_irq(&phba->hbalock);
7847 	if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
7848 		/* Asynchronous mailbox posting is not blocked, do nothing */
7849 		spin_unlock_irq(&phba->hbalock);
7850 		return;
7851 	}
7852 
7853 	/* Outstanding synchronous mailbox command is guaranteed to be done,
7854 	 * successful or timeout, after timing-out the outstanding mailbox
7855 	 * command shall always be removed, so just unblock posting async
7856 	 * mailbox command and resume
7857 	 */
7858 	psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
7859 	spin_unlock_irq(&phba->hbalock);
7860 
7861 	/* wake up worker thread to post asynchronlous mailbox command */
7862 	lpfc_worker_wake_up(phba);
7863 }
7864 
7865 /**
7866  * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7867  * @phba: Pointer to HBA context object.
7868  * @mboxq: Pointer to mailbox object.
7869  *
7870  * The function waits for the bootstrap mailbox register ready bit from
7871  * port for twice the regular mailbox command timeout value.
7872  *
7873  *      0 - no timeout on waiting for bootstrap mailbox register ready.
7874  *      MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7875  **/
7876 static int
7877 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7878 {
7879 	uint32_t db_ready;
7880 	unsigned long timeout;
7881 	struct lpfc_register bmbx_reg;
7882 
7883 	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
7884 				   * 1000) + jiffies;
7885 
7886 	do {
7887 		bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
7888 		db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
7889 		if (!db_ready)
7890 			msleep(2);
7891 
7892 		if (time_after(jiffies, timeout))
7893 			return MBXERR_ERROR;
7894 	} while (!db_ready);
7895 
7896 	return 0;
7897 }
7898 
7899 /**
7900  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7901  * @phba: Pointer to HBA context object.
7902  * @mboxq: Pointer to mailbox object.
7903  *
7904  * The function posts a mailbox to the port.  The mailbox is expected
7905  * to be comletely filled in and ready for the port to operate on it.
7906  * This routine executes a synchronous completion operation on the
7907  * mailbox by polling for its completion.
7908  *
7909  * The caller must not be holding any locks when calling this routine.
7910  *
7911  * Returns:
7912  *	MBX_SUCCESS - mailbox posted successfully
7913  *	Any of the MBX error values.
7914  **/
7915 static int
7916 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
7917 {
7918 	int rc = MBX_SUCCESS;
7919 	unsigned long iflag;
7920 	uint32_t mcqe_status;
7921 	uint32_t mbx_cmnd;
7922 	struct lpfc_sli *psli = &phba->sli;
7923 	struct lpfc_mqe *mb = &mboxq->u.mqe;
7924 	struct lpfc_bmbx_create *mbox_rgn;
7925 	struct dma_address *dma_address;
7926 
7927 	/*
7928 	 * Only one mailbox can be active to the bootstrap mailbox region
7929 	 * at a time and there is no queueing provided.
7930 	 */
7931 	spin_lock_irqsave(&phba->hbalock, iflag);
7932 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
7933 		spin_unlock_irqrestore(&phba->hbalock, iflag);
7934 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
7935 				"(%d):2532 Mailbox command x%x (x%x/x%x) "
7936 				"cannot issue Data: x%x x%x\n",
7937 				mboxq->vport ? mboxq->vport->vpi : 0,
7938 				mboxq->u.mb.mbxCommand,
7939 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
7940 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
7941 				psli->sli_flag, MBX_POLL);
7942 		return MBXERR_ERROR;
7943 	}
7944 	/* The server grabs the token and owns it until release */
7945 	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
7946 	phba->sli.mbox_active = mboxq;
7947 	spin_unlock_irqrestore(&phba->hbalock, iflag);
7948 
7949 	/* wait for bootstrap mbox register for readyness */
7950 	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7951 	if (rc)
7952 		goto exit;
7953 
7954 	/*
7955 	 * Initialize the bootstrap memory region to avoid stale data areas
7956 	 * in the mailbox post.  Then copy the caller's mailbox contents to
7957 	 * the bmbx mailbox region.
7958 	 */
7959 	mbx_cmnd = bf_get(lpfc_mqe_command, mb);
7960 	memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
7961 	lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
7962 			      sizeof(struct lpfc_mqe));
7963 
7964 	/* Post the high mailbox dma address to the port and wait for ready. */
7965 	dma_address = &phba->sli4_hba.bmbx.dma_address;
7966 	writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
7967 
7968 	/* wait for bootstrap mbox register for hi-address write done */
7969 	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7970 	if (rc)
7971 		goto exit;
7972 
7973 	/* Post the low mailbox dma address to the port. */
7974 	writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
7975 
7976 	/* wait for bootstrap mbox register for low address write done */
7977 	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
7978 	if (rc)
7979 		goto exit;
7980 
7981 	/*
7982 	 * Read the CQ to ensure the mailbox has completed.
7983 	 * If so, update the mailbox status so that the upper layers
7984 	 * can complete the request normally.
7985 	 */
7986 	lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
7987 			      sizeof(struct lpfc_mqe));
7988 	mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
7989 	lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
7990 			      sizeof(struct lpfc_mcqe));
7991 	mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
7992 	/*
7993 	 * When the CQE status indicates a failure and the mailbox status
7994 	 * indicates success then copy the CQE status into the mailbox status
7995 	 * (and prefix it with x4000).
7996 	 */
7997 	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
7998 		if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
7999 			bf_set(lpfc_mqe_status, mb,
8000 			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
8001 		rc = MBXERR_ERROR;
8002 	} else
8003 		lpfc_sli4_swap_str(phba, mboxq);
8004 
8005 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8006 			"(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8007 			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8008 			" x%x x%x CQ: x%x x%x x%x x%x\n",
8009 			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8010 			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8011 			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8012 			bf_get(lpfc_mqe_status, mb),
8013 			mb->un.mb_words[0], mb->un.mb_words[1],
8014 			mb->un.mb_words[2], mb->un.mb_words[3],
8015 			mb->un.mb_words[4], mb->un.mb_words[5],
8016 			mb->un.mb_words[6], mb->un.mb_words[7],
8017 			mb->un.mb_words[8], mb->un.mb_words[9],
8018 			mb->un.mb_words[10], mb->un.mb_words[11],
8019 			mb->un.mb_words[12], mboxq->mcqe.word0,
8020 			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
8021 			mboxq->mcqe.trailer);
8022 exit:
8023 	/* We are holding the token, no needed for lock when release */
8024 	spin_lock_irqsave(&phba->hbalock, iflag);
8025 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8026 	phba->sli.mbox_active = NULL;
8027 	spin_unlock_irqrestore(&phba->hbalock, iflag);
8028 	return rc;
8029 }
8030 
8031 /**
8032  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8033  * @phba: Pointer to HBA context object.
8034  * @pmbox: Pointer to mailbox object.
8035  * @flag: Flag indicating how the mailbox need to be processed.
8036  *
8037  * This function is called by discovery code and HBA management code to submit
8038  * a mailbox command to firmware with SLI-4 interface spec.
8039  *
8040  * Return codes the caller owns the mailbox command after the return of the
8041  * function.
8042  **/
8043 static int
8044 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
8045 		       uint32_t flag)
8046 {
8047 	struct lpfc_sli *psli = &phba->sli;
8048 	unsigned long iflags;
8049 	int rc;
8050 
8051 	/* dump from issue mailbox command if setup */
8052 	lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
8053 
8054 	rc = lpfc_mbox_dev_check(phba);
8055 	if (unlikely(rc)) {
8056 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8057 				"(%d):2544 Mailbox command x%x (x%x/x%x) "
8058 				"cannot issue Data: x%x x%x\n",
8059 				mboxq->vport ? mboxq->vport->vpi : 0,
8060 				mboxq->u.mb.mbxCommand,
8061 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8062 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8063 				psli->sli_flag, flag);
8064 		goto out_not_finished;
8065 	}
8066 
8067 	/* Detect polling mode and jump to a handler */
8068 	if (!phba->sli4_hba.intr_enable) {
8069 		if (flag == MBX_POLL)
8070 			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8071 		else
8072 			rc = -EIO;
8073 		if (rc != MBX_SUCCESS)
8074 			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8075 					"(%d):2541 Mailbox command x%x "
8076 					"(x%x/x%x) failure: "
8077 					"mqe_sta: x%x mcqe_sta: x%x/x%x "
8078 					"Data: x%x x%x\n,",
8079 					mboxq->vport ? mboxq->vport->vpi : 0,
8080 					mboxq->u.mb.mbxCommand,
8081 					lpfc_sli_config_mbox_subsys_get(phba,
8082 									mboxq),
8083 					lpfc_sli_config_mbox_opcode_get(phba,
8084 									mboxq),
8085 					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8086 					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8087 					bf_get(lpfc_mcqe_ext_status,
8088 					       &mboxq->mcqe),
8089 					psli->sli_flag, flag);
8090 		return rc;
8091 	} else if (flag == MBX_POLL) {
8092 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8093 				"(%d):2542 Try to issue mailbox command "
8094 				"x%x (x%x/x%x) synchronously ahead of async"
8095 				"mailbox command queue: x%x x%x\n",
8096 				mboxq->vport ? mboxq->vport->vpi : 0,
8097 				mboxq->u.mb.mbxCommand,
8098 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8099 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8100 				psli->sli_flag, flag);
8101 		/* Try to block the asynchronous mailbox posting */
8102 		rc = lpfc_sli4_async_mbox_block(phba);
8103 		if (!rc) {
8104 			/* Successfully blocked, now issue sync mbox cmd */
8105 			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
8106 			if (rc != MBX_SUCCESS)
8107 				lpfc_printf_log(phba, KERN_WARNING,
8108 					LOG_MBOX | LOG_SLI,
8109 					"(%d):2597 Sync Mailbox command "
8110 					"x%x (x%x/x%x) failure: "
8111 					"mqe_sta: x%x mcqe_sta: x%x/x%x "
8112 					"Data: x%x x%x\n,",
8113 					mboxq->vport ? mboxq->vport->vpi : 0,
8114 					mboxq->u.mb.mbxCommand,
8115 					lpfc_sli_config_mbox_subsys_get(phba,
8116 									mboxq),
8117 					lpfc_sli_config_mbox_opcode_get(phba,
8118 									mboxq),
8119 					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
8120 					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
8121 					bf_get(lpfc_mcqe_ext_status,
8122 					       &mboxq->mcqe),
8123 					psli->sli_flag, flag);
8124 			/* Unblock the async mailbox posting afterward */
8125 			lpfc_sli4_async_mbox_unblock(phba);
8126 		}
8127 		return rc;
8128 	}
8129 
8130 	/* Now, interrupt mode asynchrous mailbox command */
8131 	rc = lpfc_mbox_cmd_check(phba, mboxq);
8132 	if (rc) {
8133 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8134 				"(%d):2543 Mailbox command x%x (x%x/x%x) "
8135 				"cannot issue Data: x%x x%x\n",
8136 				mboxq->vport ? mboxq->vport->vpi : 0,
8137 				mboxq->u.mb.mbxCommand,
8138 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8139 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8140 				psli->sli_flag, flag);
8141 		goto out_not_finished;
8142 	}
8143 
8144 	/* Put the mailbox command to the driver internal FIFO */
8145 	psli->slistat.mbox_busy++;
8146 	spin_lock_irqsave(&phba->hbalock, iflags);
8147 	lpfc_mbox_put(phba, mboxq);
8148 	spin_unlock_irqrestore(&phba->hbalock, iflags);
8149 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8150 			"(%d):0354 Mbox cmd issue - Enqueue Data: "
8151 			"x%x (x%x/x%x) x%x x%x x%x\n",
8152 			mboxq->vport ? mboxq->vport->vpi : 0xffffff,
8153 			bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8154 			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8155 			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8156 			phba->pport->port_state,
8157 			psli->sli_flag, MBX_NOWAIT);
8158 	/* Wake up worker thread to transport mailbox command from head */
8159 	lpfc_worker_wake_up(phba);
8160 
8161 	return MBX_BUSY;
8162 
8163 out_not_finished:
8164 	return MBX_NOT_FINISHED;
8165 }
8166 
8167 /**
8168  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8169  * @phba: Pointer to HBA context object.
8170  *
8171  * This function is called by worker thread to send a mailbox command to
8172  * SLI4 HBA firmware.
8173  *
8174  **/
8175 int
8176 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
8177 {
8178 	struct lpfc_sli *psli = &phba->sli;
8179 	LPFC_MBOXQ_t *mboxq;
8180 	int rc = MBX_SUCCESS;
8181 	unsigned long iflags;
8182 	struct lpfc_mqe *mqe;
8183 	uint32_t mbx_cmnd;
8184 
8185 	/* Check interrupt mode before post async mailbox command */
8186 	if (unlikely(!phba->sli4_hba.intr_enable))
8187 		return MBX_NOT_FINISHED;
8188 
8189 	/* Check for mailbox command service token */
8190 	spin_lock_irqsave(&phba->hbalock, iflags);
8191 	if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
8192 		spin_unlock_irqrestore(&phba->hbalock, iflags);
8193 		return MBX_NOT_FINISHED;
8194 	}
8195 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
8196 		spin_unlock_irqrestore(&phba->hbalock, iflags);
8197 		return MBX_NOT_FINISHED;
8198 	}
8199 	if (unlikely(phba->sli.mbox_active)) {
8200 		spin_unlock_irqrestore(&phba->hbalock, iflags);
8201 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8202 				"0384 There is pending active mailbox cmd\n");
8203 		return MBX_NOT_FINISHED;
8204 	}
8205 	/* Take the mailbox command service token */
8206 	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
8207 
8208 	/* Get the next mailbox command from head of queue */
8209 	mboxq = lpfc_mbox_get(phba);
8210 
8211 	/* If no more mailbox command waiting for post, we're done */
8212 	if (!mboxq) {
8213 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8214 		spin_unlock_irqrestore(&phba->hbalock, iflags);
8215 		return MBX_SUCCESS;
8216 	}
8217 	phba->sli.mbox_active = mboxq;
8218 	spin_unlock_irqrestore(&phba->hbalock, iflags);
8219 
8220 	/* Check device readiness for posting mailbox command */
8221 	rc = lpfc_mbox_dev_check(phba);
8222 	if (unlikely(rc))
8223 		/* Driver clean routine will clean up pending mailbox */
8224 		goto out_not_finished;
8225 
8226 	/* Prepare the mbox command to be posted */
8227 	mqe = &mboxq->u.mqe;
8228 	mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
8229 
8230 	/* Start timer for the mbox_tmo and log some mailbox post messages */
8231 	mod_timer(&psli->mbox_tmo, (jiffies +
8232 		  msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
8233 
8234 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8235 			"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8236 			"x%x x%x\n",
8237 			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
8238 			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8239 			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8240 			phba->pport->port_state, psli->sli_flag);
8241 
8242 	if (mbx_cmnd != MBX_HEARTBEAT) {
8243 		if (mboxq->vport) {
8244 			lpfc_debugfs_disc_trc(mboxq->vport,
8245 				LPFC_DISC_TRC_MBOX_VPORT,
8246 				"MBOX Send vport: cmd:x%x mb:x%x x%x",
8247 				mbx_cmnd, mqe->un.mb_words[0],
8248 				mqe->un.mb_words[1]);
8249 		} else {
8250 			lpfc_debugfs_disc_trc(phba->pport,
8251 				LPFC_DISC_TRC_MBOX,
8252 				"MBOX Send: cmd:x%x mb:x%x x%x",
8253 				mbx_cmnd, mqe->un.mb_words[0],
8254 				mqe->un.mb_words[1]);
8255 		}
8256 	}
8257 	psli->slistat.mbox_cmd++;
8258 
8259 	/* Post the mailbox command to the port */
8260 	rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
8261 	if (rc != MBX_SUCCESS) {
8262 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
8263 				"(%d):2533 Mailbox command x%x (x%x/x%x) "
8264 				"cannot issue Data: x%x x%x\n",
8265 				mboxq->vport ? mboxq->vport->vpi : 0,
8266 				mboxq->u.mb.mbxCommand,
8267 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
8268 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
8269 				psli->sli_flag, MBX_NOWAIT);
8270 		goto out_not_finished;
8271 	}
8272 
8273 	return rc;
8274 
8275 out_not_finished:
8276 	spin_lock_irqsave(&phba->hbalock, iflags);
8277 	if (phba->sli.mbox_active) {
8278 		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
8279 		__lpfc_mbox_cmpl_put(phba, mboxq);
8280 		/* Release the token */
8281 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
8282 		phba->sli.mbox_active = NULL;
8283 	}
8284 	spin_unlock_irqrestore(&phba->hbalock, iflags);
8285 
8286 	return MBX_NOT_FINISHED;
8287 }
8288 
8289 /**
8290  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8291  * @phba: Pointer to HBA context object.
8292  * @pmbox: Pointer to mailbox object.
8293  * @flag: Flag indicating how the mailbox need to be processed.
8294  *
8295  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
8296  * the API jump table function pointer from the lpfc_hba struct.
8297  *
8298  * Return codes the caller owns the mailbox command after the return of the
8299  * function.
8300  **/
8301 int
8302 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
8303 {
8304 	return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
8305 }
8306 
8307 /**
8308  * lpfc_mbox_api_table_setup - Set up mbox api function jump table
8309  * @phba: The hba struct for which this call is being executed.
8310  * @dev_grp: The HBA PCI-Device group number.
8311  *
8312  * This routine sets up the mbox interface API function jump table in @phba
8313  * struct.
8314  * Returns: 0 - success, -ENODEV - failure.
8315  **/
8316 int
8317 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8318 {
8319 
8320 	switch (dev_grp) {
8321 	case LPFC_PCI_DEV_LP:
8322 		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
8323 		phba->lpfc_sli_handle_slow_ring_event =
8324 				lpfc_sli_handle_slow_ring_event_s3;
8325 		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
8326 		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
8327 		phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
8328 		break;
8329 	case LPFC_PCI_DEV_OC:
8330 		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
8331 		phba->lpfc_sli_handle_slow_ring_event =
8332 				lpfc_sli_handle_slow_ring_event_s4;
8333 		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
8334 		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
8335 		phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
8336 		break;
8337 	default:
8338 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8339 				"1420 Invalid HBA PCI-device group: 0x%x\n",
8340 				dev_grp);
8341 		return -ENODEV;
8342 		break;
8343 	}
8344 	return 0;
8345 }
8346 
8347 /**
8348  * __lpfc_sli_ringtx_put - Add an iocb to the txq
8349  * @phba: Pointer to HBA context object.
8350  * @pring: Pointer to driver SLI ring object.
8351  * @piocb: Pointer to address of newly added command iocb.
8352  *
8353  * This function is called with hbalock held to add a command
8354  * iocb to the txq when SLI layer cannot submit the command iocb
8355  * to the ring.
8356  **/
8357 void
8358 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8359 		    struct lpfc_iocbq *piocb)
8360 {
8361 	lockdep_assert_held(&phba->hbalock);
8362 	/* Insert the caller's iocb in the txq tail for later processing. */
8363 	list_add_tail(&piocb->list, &pring->txq);
8364 }
8365 
8366 /**
8367  * lpfc_sli_next_iocb - Get the next iocb in the txq
8368  * @phba: Pointer to HBA context object.
8369  * @pring: Pointer to driver SLI ring object.
8370  * @piocb: Pointer to address of newly added command iocb.
8371  *
8372  * This function is called with hbalock held before a new
8373  * iocb is submitted to the firmware. This function checks
8374  * txq to flush the iocbs in txq to Firmware before
8375  * submitting new iocbs to the Firmware.
8376  * If there are iocbs in the txq which need to be submitted
8377  * to firmware, lpfc_sli_next_iocb returns the first element
8378  * of the txq after dequeuing it from txq.
8379  * If there is no iocb in the txq then the function will return
8380  * *piocb and *piocb is set to NULL. Caller needs to check
8381  * *piocb to find if there are more commands in the txq.
8382  **/
8383 static struct lpfc_iocbq *
8384 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
8385 		   struct lpfc_iocbq **piocb)
8386 {
8387 	struct lpfc_iocbq * nextiocb;
8388 
8389 	lockdep_assert_held(&phba->hbalock);
8390 
8391 	nextiocb = lpfc_sli_ringtx_get(phba, pring);
8392 	if (!nextiocb) {
8393 		nextiocb = *piocb;
8394 		*piocb = NULL;
8395 	}
8396 
8397 	return nextiocb;
8398 }
8399 
8400 /**
8401  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
8402  * @phba: Pointer to HBA context object.
8403  * @ring_number: SLI ring number to issue iocb on.
8404  * @piocb: Pointer to command iocb.
8405  * @flag: Flag indicating if this command can be put into txq.
8406  *
8407  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
8408  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
8409  * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
8410  * flag is turned on, the function returns IOCB_ERROR. When the link is down,
8411  * this function allows only iocbs for posting buffers. This function finds
8412  * next available slot in the command ring and posts the command to the
8413  * available slot and writes the port attention register to request HBA start
8414  * processing new iocb. If there is no slot available in the ring and
8415  * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
8416  * the function returns IOCB_BUSY.
8417  *
8418  * This function is called with hbalock held. The function will return success
8419  * after it successfully submit the iocb to firmware or after adding to the
8420  * txq.
8421  **/
8422 static int
8423 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
8424 		    struct lpfc_iocbq *piocb, uint32_t flag)
8425 {
8426 	struct lpfc_iocbq *nextiocb;
8427 	IOCB_t *iocb;
8428 	struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
8429 
8430 	lockdep_assert_held(&phba->hbalock);
8431 
8432 	if (piocb->iocb_cmpl && (!piocb->vport) &&
8433 	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
8434 	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
8435 		lpfc_printf_log(phba, KERN_ERR,
8436 				LOG_SLI | LOG_VPORT,
8437 				"1807 IOCB x%x failed. No vport\n",
8438 				piocb->iocb.ulpCommand);
8439 		dump_stack();
8440 		return IOCB_ERROR;
8441 	}
8442 
8443 
8444 	/* If the PCI channel is in offline state, do not post iocbs. */
8445 	if (unlikely(pci_channel_offline(phba->pcidev)))
8446 		return IOCB_ERROR;
8447 
8448 	/* If HBA has a deferred error attention, fail the iocb. */
8449 	if (unlikely(phba->hba_flag & DEFER_ERATT))
8450 		return IOCB_ERROR;
8451 
8452 	/*
8453 	 * We should never get an IOCB if we are in a < LINK_DOWN state
8454 	 */
8455 	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
8456 		return IOCB_ERROR;
8457 
8458 	/*
8459 	 * Check to see if we are blocking IOCB processing because of a
8460 	 * outstanding event.
8461 	 */
8462 	if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
8463 		goto iocb_busy;
8464 
8465 	if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
8466 		/*
8467 		 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
8468 		 * can be issued if the link is not up.
8469 		 */
8470 		switch (piocb->iocb.ulpCommand) {
8471 		case CMD_GEN_REQUEST64_CR:
8472 		case CMD_GEN_REQUEST64_CX:
8473 			if (!(phba->sli.sli_flag & LPFC_MENLO_MAINT) ||
8474 				(piocb->iocb.un.genreq64.w5.hcsw.Rctl !=
8475 					FC_RCTL_DD_UNSOL_CMD) ||
8476 				(piocb->iocb.un.genreq64.w5.hcsw.Type !=
8477 					MENLO_TRANSPORT_TYPE))
8478 
8479 				goto iocb_busy;
8480 			break;
8481 		case CMD_QUE_RING_BUF_CN:
8482 		case CMD_QUE_RING_BUF64_CN:
8483 			/*
8484 			 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
8485 			 * completion, iocb_cmpl MUST be 0.
8486 			 */
8487 			if (piocb->iocb_cmpl)
8488 				piocb->iocb_cmpl = NULL;
8489 			/*FALLTHROUGH*/
8490 		case CMD_CREATE_XRI_CR:
8491 		case CMD_CLOSE_XRI_CN:
8492 		case CMD_CLOSE_XRI_CX:
8493 			break;
8494 		default:
8495 			goto iocb_busy;
8496 		}
8497 
8498 	/*
8499 	 * For FCP commands, we must be in a state where we can process link
8500 	 * attention events.
8501 	 */
8502 	} else if (unlikely(pring->ringno == LPFC_FCP_RING &&
8503 			    !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
8504 		goto iocb_busy;
8505 	}
8506 
8507 	while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
8508 	       (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
8509 		lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
8510 
8511 	if (iocb)
8512 		lpfc_sli_update_ring(phba, pring);
8513 	else
8514 		lpfc_sli_update_full_ring(phba, pring);
8515 
8516 	if (!piocb)
8517 		return IOCB_SUCCESS;
8518 
8519 	goto out_busy;
8520 
8521  iocb_busy:
8522 	pring->stats.iocb_cmd_delay++;
8523 
8524  out_busy:
8525 
8526 	if (!(flag & SLI_IOCB_RET_IOCB)) {
8527 		__lpfc_sli_ringtx_put(phba, pring, piocb);
8528 		return IOCB_SUCCESS;
8529 	}
8530 
8531 	return IOCB_BUSY;
8532 }
8533 
8534 /**
8535  * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
8536  * @phba: Pointer to HBA context object.
8537  * @piocb: Pointer to command iocb.
8538  * @sglq: Pointer to the scatter gather queue object.
8539  *
8540  * This routine converts the bpl or bde that is in the IOCB
8541  * to a sgl list for the sli4 hardware. The physical address
8542  * of the bpl/bde is converted back to a virtual address.
8543  * If the IOCB contains a BPL then the list of BDE's is
8544  * converted to sli4_sge's. If the IOCB contains a single
8545  * BDE then it is converted to a single sli_sge.
8546  * The IOCB is still in cpu endianess so the contents of
8547  * the bpl can be used without byte swapping.
8548  *
8549  * Returns valid XRI = Success, NO_XRI = Failure.
8550 **/
8551 static uint16_t
8552 lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
8553 		struct lpfc_sglq *sglq)
8554 {
8555 	uint16_t xritag = NO_XRI;
8556 	struct ulp_bde64 *bpl = NULL;
8557 	struct ulp_bde64 bde;
8558 	struct sli4_sge *sgl  = NULL;
8559 	struct lpfc_dmabuf *dmabuf;
8560 	IOCB_t *icmd;
8561 	int numBdes = 0;
8562 	int i = 0;
8563 	uint32_t offset = 0; /* accumulated offset in the sg request list */
8564 	int inbound = 0; /* number of sg reply entries inbound from firmware */
8565 
8566 	if (!piocbq || !sglq)
8567 		return xritag;
8568 
8569 	sgl  = (struct sli4_sge *)sglq->sgl;
8570 	icmd = &piocbq->iocb;
8571 	if (icmd->ulpCommand == CMD_XMIT_BLS_RSP64_CX)
8572 		return sglq->sli4_xritag;
8573 	if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8574 		numBdes = icmd->un.genreq64.bdl.bdeSize /
8575 				sizeof(struct ulp_bde64);
8576 		/* The addrHigh and addrLow fields within the IOCB
8577 		 * have not been byteswapped yet so there is no
8578 		 * need to swap them back.
8579 		 */
8580 		if (piocbq->context3)
8581 			dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
8582 		else
8583 			return xritag;
8584 
8585 		bpl  = (struct ulp_bde64 *)dmabuf->virt;
8586 		if (!bpl)
8587 			return xritag;
8588 
8589 		for (i = 0; i < numBdes; i++) {
8590 			/* Should already be byte swapped. */
8591 			sgl->addr_hi = bpl->addrHigh;
8592 			sgl->addr_lo = bpl->addrLow;
8593 
8594 			sgl->word2 = le32_to_cpu(sgl->word2);
8595 			if ((i+1) == numBdes)
8596 				bf_set(lpfc_sli4_sge_last, sgl, 1);
8597 			else
8598 				bf_set(lpfc_sli4_sge_last, sgl, 0);
8599 			/* swap the size field back to the cpu so we
8600 			 * can assign it to the sgl.
8601 			 */
8602 			bde.tus.w = le32_to_cpu(bpl->tus.w);
8603 			sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
8604 			/* The offsets in the sgl need to be accumulated
8605 			 * separately for the request and reply lists.
8606 			 * The request is always first, the reply follows.
8607 			 */
8608 			if (piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) {
8609 				/* add up the reply sg entries */
8610 				if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
8611 					inbound++;
8612 				/* first inbound? reset the offset */
8613 				if (inbound == 1)
8614 					offset = 0;
8615 				bf_set(lpfc_sli4_sge_offset, sgl, offset);
8616 				bf_set(lpfc_sli4_sge_type, sgl,
8617 					LPFC_SGE_TYPE_DATA);
8618 				offset += bde.tus.f.bdeSize;
8619 			}
8620 			sgl->word2 = cpu_to_le32(sgl->word2);
8621 			bpl++;
8622 			sgl++;
8623 		}
8624 	} else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
8625 			/* The addrHigh and addrLow fields of the BDE have not
8626 			 * been byteswapped yet so they need to be swapped
8627 			 * before putting them in the sgl.
8628 			 */
8629 			sgl->addr_hi =
8630 				cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
8631 			sgl->addr_lo =
8632 				cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
8633 			sgl->word2 = le32_to_cpu(sgl->word2);
8634 			bf_set(lpfc_sli4_sge_last, sgl, 1);
8635 			sgl->word2 = cpu_to_le32(sgl->word2);
8636 			sgl->sge_len =
8637 				cpu_to_le32(icmd->un.genreq64.bdl.bdeSize);
8638 	}
8639 	return sglq->sli4_xritag;
8640 }
8641 
8642 /**
8643  * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
8644  * @phba: Pointer to HBA context object.
8645  * @piocb: Pointer to command iocb.
8646  * @wqe: Pointer to the work queue entry.
8647  *
8648  * This routine converts the iocb command to its Work Queue Entry
8649  * equivalent. The wqe pointer should not have any fields set when
8650  * this routine is called because it will memcpy over them.
8651  * This routine does not set the CQ_ID or the WQEC bits in the
8652  * wqe.
8653  *
8654  * Returns: 0 = Success, IOCB_ERROR = Failure.
8655  **/
8656 static int
8657 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
8658 		union lpfc_wqe *wqe)
8659 {
8660 	uint32_t xmit_len = 0, total_len = 0;
8661 	uint8_t ct = 0;
8662 	uint32_t fip;
8663 	uint32_t abort_tag;
8664 	uint8_t command_type = ELS_COMMAND_NON_FIP;
8665 	uint8_t cmnd;
8666 	uint16_t xritag;
8667 	uint16_t abrt_iotag;
8668 	struct lpfc_iocbq *abrtiocbq;
8669 	struct ulp_bde64 *bpl = NULL;
8670 	uint32_t els_id = LPFC_ELS_ID_DEFAULT;
8671 	int numBdes, i;
8672 	struct ulp_bde64 bde;
8673 	struct lpfc_nodelist *ndlp;
8674 	uint32_t *pcmd;
8675 	uint32_t if_type;
8676 
8677 	fip = phba->hba_flag & HBA_FIP_SUPPORT;
8678 	/* The fcp commands will set command type */
8679 	if (iocbq->iocb_flag &  LPFC_IO_FCP)
8680 		command_type = FCP_COMMAND;
8681 	else if (fip && (iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK))
8682 		command_type = ELS_COMMAND_FIP;
8683 	else
8684 		command_type = ELS_COMMAND_NON_FIP;
8685 
8686 	if (phba->fcp_embed_io)
8687 		memset(wqe, 0, sizeof(union lpfc_wqe128));
8688 	/* Some of the fields are in the right position already */
8689 	memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
8690 	if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) {
8691 		/* The ct field has moved so reset */
8692 		wqe->generic.wqe_com.word7 = 0;
8693 		wqe->generic.wqe_com.word10 = 0;
8694 	}
8695 
8696 	abort_tag = (uint32_t) iocbq->iotag;
8697 	xritag = iocbq->sli4_xritag;
8698 	/* words0-2 bpl convert bde */
8699 	if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
8700 		numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
8701 				sizeof(struct ulp_bde64);
8702 		bpl  = (struct ulp_bde64 *)
8703 			((struct lpfc_dmabuf *)iocbq->context3)->virt;
8704 		if (!bpl)
8705 			return IOCB_ERROR;
8706 
8707 		/* Should already be byte swapped. */
8708 		wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
8709 		wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
8710 		/* swap the size field back to the cpu so we
8711 		 * can assign it to the sgl.
8712 		 */
8713 		wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
8714 		xmit_len = wqe->generic.bde.tus.f.bdeSize;
8715 		total_len = 0;
8716 		for (i = 0; i < numBdes; i++) {
8717 			bde.tus.w  = le32_to_cpu(bpl[i].tus.w);
8718 			total_len += bde.tus.f.bdeSize;
8719 		}
8720 	} else
8721 		xmit_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
8722 
8723 	iocbq->iocb.ulpIoTag = iocbq->iotag;
8724 	cmnd = iocbq->iocb.ulpCommand;
8725 
8726 	switch (iocbq->iocb.ulpCommand) {
8727 	case CMD_ELS_REQUEST64_CR:
8728 		if (iocbq->iocb_flag & LPFC_IO_LIBDFC)
8729 			ndlp = iocbq->context_un.ndlp;
8730 		else
8731 			ndlp = (struct lpfc_nodelist *)iocbq->context1;
8732 		if (!iocbq->iocb.ulpLe) {
8733 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8734 				"2007 Only Limited Edition cmd Format"
8735 				" supported 0x%x\n",
8736 				iocbq->iocb.ulpCommand);
8737 			return IOCB_ERROR;
8738 		}
8739 
8740 		wqe->els_req.payload_len = xmit_len;
8741 		/* Els_reguest64 has a TMO */
8742 		bf_set(wqe_tmo, &wqe->els_req.wqe_com,
8743 			iocbq->iocb.ulpTimeout);
8744 		/* Need a VF for word 4 set the vf bit*/
8745 		bf_set(els_req64_vf, &wqe->els_req, 0);
8746 		/* And a VFID for word 12 */
8747 		bf_set(els_req64_vfid, &wqe->els_req, 0);
8748 		ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
8749 		bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8750 		       iocbq->iocb.ulpContext);
8751 		bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
8752 		bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
8753 		/* CCP CCPE PV PRI in word10 were set in the memcpy */
8754 		if (command_type == ELS_COMMAND_FIP)
8755 			els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
8756 					>> LPFC_FIP_ELS_ID_SHIFT);
8757 		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
8758 					iocbq->context2)->virt);
8759 		if_type = bf_get(lpfc_sli_intf_if_type,
8760 					&phba->sli4_hba.sli_intf);
8761 		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
8762 			if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
8763 				*pcmd == ELS_CMD_SCR ||
8764 				*pcmd == ELS_CMD_FDISC ||
8765 				*pcmd == ELS_CMD_LOGO ||
8766 				*pcmd == ELS_CMD_PLOGI)) {
8767 				bf_set(els_req64_sp, &wqe->els_req, 1);
8768 				bf_set(els_req64_sid, &wqe->els_req,
8769 					iocbq->vport->fc_myDID);
8770 				if ((*pcmd == ELS_CMD_FLOGI) &&
8771 					!(phba->fc_topology ==
8772 						LPFC_TOPOLOGY_LOOP))
8773 					bf_set(els_req64_sid, &wqe->els_req, 0);
8774 				bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
8775 				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8776 					phba->vpi_ids[iocbq->vport->vpi]);
8777 			} else if (pcmd && iocbq->context1) {
8778 				bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
8779 				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
8780 					phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8781 			}
8782 		}
8783 		bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
8784 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
8785 		bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
8786 		bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
8787 		bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
8788 		bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
8789 		bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
8790 		bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
8791 		wqe->els_req.max_response_payload_len = total_len - xmit_len;
8792 		break;
8793 	case CMD_XMIT_SEQUENCE64_CX:
8794 		bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
8795 		       iocbq->iocb.un.ulpWord[3]);
8796 		bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
8797 		       iocbq->iocb.unsli3.rcvsli3.ox_id);
8798 		/* The entire sequence is transmitted for this IOCB */
8799 		xmit_len = total_len;
8800 		cmnd = CMD_XMIT_SEQUENCE64_CR;
8801 		if (phba->link_flag & LS_LOOPBACK_MODE)
8802 			bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
8803 	case CMD_XMIT_SEQUENCE64_CR:
8804 		/* word3 iocb=io_tag32 wqe=reserved */
8805 		wqe->xmit_sequence.rsvd3 = 0;
8806 		/* word4 relative_offset memcpy */
8807 		/* word5 r_ctl/df_ctl memcpy */
8808 		bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
8809 		bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
8810 		bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
8811 		       LPFC_WQE_IOD_WRITE);
8812 		bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
8813 		       LPFC_WQE_LENLOC_WORD12);
8814 		bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
8815 		wqe->xmit_sequence.xmit_len = xmit_len;
8816 		command_type = OTHER_COMMAND;
8817 		break;
8818 	case CMD_XMIT_BCAST64_CN:
8819 		/* word3 iocb=iotag32 wqe=seq_payload_len */
8820 		wqe->xmit_bcast64.seq_payload_len = xmit_len;
8821 		/* word4 iocb=rsvd wqe=rsvd */
8822 		/* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8823 		/* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
8824 		bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
8825 			((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
8826 		bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
8827 		bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
8828 		bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
8829 		       LPFC_WQE_LENLOC_WORD3);
8830 		bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
8831 		break;
8832 	case CMD_FCP_IWRITE64_CR:
8833 		command_type = FCP_COMMAND_DATA_OUT;
8834 		/* word3 iocb=iotag wqe=payload_offset_len */
8835 		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8836 		bf_set(payload_offset_len, &wqe->fcp_iwrite,
8837 		       xmit_len + sizeof(struct fcp_rsp));
8838 		bf_set(cmd_buff_len, &wqe->fcp_iwrite,
8839 		       0);
8840 		/* word4 iocb=parameter wqe=total_xfer_length memcpy */
8841 		/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8842 		bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
8843 		       iocbq->iocb.ulpFCP2Rcvy);
8844 		bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
8845 		/* Always open the exchange */
8846 		bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
8847 		bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
8848 		       LPFC_WQE_LENLOC_WORD4);
8849 		bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
8850 		bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
8851 		if (iocbq->iocb_flag & LPFC_IO_OAS) {
8852 			bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
8853 			bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
8854 			if (iocbq->priority) {
8855 				bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8856 				       (iocbq->priority << 1));
8857 			} else {
8858 				bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
8859 				       (phba->cfg_XLanePriority << 1));
8860 			}
8861 		}
8862 		/* Note, word 10 is already initialized to 0 */
8863 
8864 		if (phba->fcp_embed_io) {
8865 			struct lpfc_scsi_buf *lpfc_cmd;
8866 			struct sli4_sge *sgl;
8867 			union lpfc_wqe128 *wqe128;
8868 			struct fcp_cmnd *fcp_cmnd;
8869 			uint32_t *ptr;
8870 
8871 			/* 128 byte wqe support here */
8872 			wqe128 = (union lpfc_wqe128 *)wqe;
8873 
8874 			lpfc_cmd = iocbq->context1;
8875 			sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8876 			fcp_cmnd = lpfc_cmd->fcp_cmnd;
8877 
8878 			/* Word 0-2 - FCP_CMND */
8879 			wqe128->generic.bde.tus.f.bdeFlags =
8880 				BUFF_TYPE_BDE_IMMED;
8881 			wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8882 			wqe128->generic.bde.addrHigh = 0;
8883 			wqe128->generic.bde.addrLow =  88;  /* Word 22 */
8884 
8885 			bf_set(wqe_wqes, &wqe128->fcp_iwrite.wqe_com, 1);
8886 
8887 			/* Word 22-29  FCP CMND Payload */
8888 			ptr = &wqe128->words[22];
8889 			memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8890 		}
8891 		break;
8892 	case CMD_FCP_IREAD64_CR:
8893 		/* word3 iocb=iotag wqe=payload_offset_len */
8894 		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8895 		bf_set(payload_offset_len, &wqe->fcp_iread,
8896 		       xmit_len + sizeof(struct fcp_rsp));
8897 		bf_set(cmd_buff_len, &wqe->fcp_iread,
8898 		       0);
8899 		/* word4 iocb=parameter wqe=total_xfer_length memcpy */
8900 		/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8901 		bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
8902 		       iocbq->iocb.ulpFCP2Rcvy);
8903 		bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
8904 		/* Always open the exchange */
8905 		bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
8906 		bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
8907 		       LPFC_WQE_LENLOC_WORD4);
8908 		bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
8909 		bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
8910 		if (iocbq->iocb_flag & LPFC_IO_OAS) {
8911 			bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
8912 			bf_set(wqe_ccpe, &wqe->fcp_iread.wqe_com, 1);
8913 			if (iocbq->priority) {
8914 				bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8915 				       (iocbq->priority << 1));
8916 			} else {
8917 				bf_set(wqe_ccp, &wqe->fcp_iread.wqe_com,
8918 				       (phba->cfg_XLanePriority << 1));
8919 			}
8920 		}
8921 		/* Note, word 10 is already initialized to 0 */
8922 
8923 		if (phba->fcp_embed_io) {
8924 			struct lpfc_scsi_buf *lpfc_cmd;
8925 			struct sli4_sge *sgl;
8926 			union lpfc_wqe128 *wqe128;
8927 			struct fcp_cmnd *fcp_cmnd;
8928 			uint32_t *ptr;
8929 
8930 			/* 128 byte wqe support here */
8931 			wqe128 = (union lpfc_wqe128 *)wqe;
8932 
8933 			lpfc_cmd = iocbq->context1;
8934 			sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8935 			fcp_cmnd = lpfc_cmd->fcp_cmnd;
8936 
8937 			/* Word 0-2 - FCP_CMND */
8938 			wqe128->generic.bde.tus.f.bdeFlags =
8939 				BUFF_TYPE_BDE_IMMED;
8940 			wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8941 			wqe128->generic.bde.addrHigh = 0;
8942 			wqe128->generic.bde.addrLow =  88;  /* Word 22 */
8943 
8944 			bf_set(wqe_wqes, &wqe128->fcp_iread.wqe_com, 1);
8945 
8946 			/* Word 22-29  FCP CMND Payload */
8947 			ptr = &wqe128->words[22];
8948 			memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
8949 		}
8950 		break;
8951 	case CMD_FCP_ICMND64_CR:
8952 		/* word3 iocb=iotag wqe=payload_offset_len */
8953 		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8954 		bf_set(payload_offset_len, &wqe->fcp_icmd,
8955 		       xmit_len + sizeof(struct fcp_rsp));
8956 		bf_set(cmd_buff_len, &wqe->fcp_icmd,
8957 		       0);
8958 		/* word3 iocb=IO_TAG wqe=reserved */
8959 		bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
8960 		/* Always open the exchange */
8961 		bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
8962 		bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
8963 		bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
8964 		bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
8965 		       LPFC_WQE_LENLOC_NONE);
8966 		bf_set(wqe_erp, &wqe->fcp_icmd.wqe_com,
8967 		       iocbq->iocb.ulpFCP2Rcvy);
8968 		if (iocbq->iocb_flag & LPFC_IO_OAS) {
8969 			bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
8970 			bf_set(wqe_ccpe, &wqe->fcp_icmd.wqe_com, 1);
8971 			if (iocbq->priority) {
8972 				bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8973 				       (iocbq->priority << 1));
8974 			} else {
8975 				bf_set(wqe_ccp, &wqe->fcp_icmd.wqe_com,
8976 				       (phba->cfg_XLanePriority << 1));
8977 			}
8978 		}
8979 		/* Note, word 10 is already initialized to 0 */
8980 
8981 		if (phba->fcp_embed_io) {
8982 			struct lpfc_scsi_buf *lpfc_cmd;
8983 			struct sli4_sge *sgl;
8984 			union lpfc_wqe128 *wqe128;
8985 			struct fcp_cmnd *fcp_cmnd;
8986 			uint32_t *ptr;
8987 
8988 			/* 128 byte wqe support here */
8989 			wqe128 = (union lpfc_wqe128 *)wqe;
8990 
8991 			lpfc_cmd = iocbq->context1;
8992 			sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
8993 			fcp_cmnd = lpfc_cmd->fcp_cmnd;
8994 
8995 			/* Word 0-2 - FCP_CMND */
8996 			wqe128->generic.bde.tus.f.bdeFlags =
8997 				BUFF_TYPE_BDE_IMMED;
8998 			wqe128->generic.bde.tus.f.bdeSize = sgl->sge_len;
8999 			wqe128->generic.bde.addrHigh = 0;
9000 			wqe128->generic.bde.addrLow =  88;  /* Word 22 */
9001 
9002 			bf_set(wqe_wqes, &wqe128->fcp_icmd.wqe_com, 1);
9003 
9004 			/* Word 22-29  FCP CMND Payload */
9005 			ptr = &wqe128->words[22];
9006 			memcpy(ptr, fcp_cmnd, sizeof(struct fcp_cmnd));
9007 		}
9008 		break;
9009 	case CMD_GEN_REQUEST64_CR:
9010 		/* For this command calculate the xmit length of the
9011 		 * request bde.
9012 		 */
9013 		xmit_len = 0;
9014 		numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
9015 			sizeof(struct ulp_bde64);
9016 		for (i = 0; i < numBdes; i++) {
9017 			bde.tus.w = le32_to_cpu(bpl[i].tus.w);
9018 			if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
9019 				break;
9020 			xmit_len += bde.tus.f.bdeSize;
9021 		}
9022 		/* word3 iocb=IO_TAG wqe=request_payload_len */
9023 		wqe->gen_req.request_payload_len = xmit_len;
9024 		/* word4 iocb=parameter wqe=relative_offset memcpy */
9025 		/* word5 [rctl, type, df_ctl, la] copied in memcpy */
9026 		/* word6 context tag copied in memcpy */
9027 		if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
9028 			ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
9029 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9030 				"2015 Invalid CT %x command 0x%x\n",
9031 				ct, iocbq->iocb.ulpCommand);
9032 			return IOCB_ERROR;
9033 		}
9034 		bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
9035 		bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
9036 		bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
9037 		bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
9038 		bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
9039 		bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
9040 		bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
9041 		bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
9042 		wqe->gen_req.max_response_payload_len = total_len - xmit_len;
9043 		command_type = OTHER_COMMAND;
9044 		break;
9045 	case CMD_XMIT_ELS_RSP64_CX:
9046 		ndlp = (struct lpfc_nodelist *)iocbq->context1;
9047 		/* words0-2 BDE memcpy */
9048 		/* word3 iocb=iotag32 wqe=response_payload_len */
9049 		wqe->xmit_els_rsp.response_payload_len = xmit_len;
9050 		/* word4 */
9051 		wqe->xmit_els_rsp.word4 = 0;
9052 		/* word5 iocb=rsvd wge=did */
9053 		bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
9054 			 iocbq->iocb.un.xseq64.xmit_els_remoteID);
9055 
9056 		if_type = bf_get(lpfc_sli_intf_if_type,
9057 					&phba->sli4_hba.sli_intf);
9058 		if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
9059 			if (iocbq->vport->fc_flag & FC_PT2PT) {
9060 				bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9061 				bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9062 					iocbq->vport->fc_myDID);
9063 				if (iocbq->vport->fc_myDID == Fabric_DID) {
9064 					bf_set(wqe_els_did,
9065 						&wqe->xmit_els_rsp.wqe_dest, 0);
9066 				}
9067 			}
9068 		}
9069 		bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
9070 		       ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9071 		bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
9072 		bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
9073 		       iocbq->iocb.unsli3.rcvsli3.ox_id);
9074 		if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
9075 			bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9076 			       phba->vpi_ids[iocbq->vport->vpi]);
9077 		bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
9078 		bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
9079 		bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
9080 		bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
9081 		       LPFC_WQE_LENLOC_WORD3);
9082 		bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
9083 		bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
9084 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
9085 		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
9086 					iocbq->context2)->virt);
9087 		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
9088 				bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
9089 				bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
9090 					iocbq->vport->fc_myDID);
9091 				bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
9092 				bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
9093 					phba->vpi_ids[phba->pport->vpi]);
9094 		}
9095 		command_type = OTHER_COMMAND;
9096 		break;
9097 	case CMD_CLOSE_XRI_CN:
9098 	case CMD_ABORT_XRI_CN:
9099 	case CMD_ABORT_XRI_CX:
9100 		/* words 0-2 memcpy should be 0 rserved */
9101 		/* port will send abts */
9102 		abrt_iotag = iocbq->iocb.un.acxri.abortContextTag;
9103 		if (abrt_iotag != 0 && abrt_iotag <= phba->sli.last_iotag) {
9104 			abrtiocbq = phba->sli.iocbq_lookup[abrt_iotag];
9105 			fip = abrtiocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK;
9106 		} else
9107 			fip = 0;
9108 
9109 		if ((iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN) || fip)
9110 			/*
9111 			 * The link is down, or the command was ELS_FIP
9112 			 * so the fw does not need to send abts
9113 			 * on the wire.
9114 			 */
9115 			bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
9116 		else
9117 			bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
9118 		bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
9119 		/* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9120 		wqe->abort_cmd.rsrvd5 = 0;
9121 		bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
9122 			((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
9123 		abort_tag = iocbq->iocb.un.acxri.abortIoTag;
9124 		/*
9125 		 * The abort handler will send us CMD_ABORT_XRI_CN or
9126 		 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9127 		 */
9128 		bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
9129 		bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
9130 		bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
9131 		       LPFC_WQE_LENLOC_NONE);
9132 		cmnd = CMD_ABORT_XRI_CX;
9133 		command_type = OTHER_COMMAND;
9134 		xritag = 0;
9135 		break;
9136 	case CMD_XMIT_BLS_RSP64_CX:
9137 		ndlp = (struct lpfc_nodelist *)iocbq->context1;
9138 		/* As BLS ABTS RSP WQE is very different from other WQEs,
9139 		 * we re-construct this WQE here based on information in
9140 		 * iocbq from scratch.
9141 		 */
9142 		memset(wqe, 0, sizeof(union lpfc_wqe));
9143 		/* OX_ID is invariable to who sent ABTS to CT exchange */
9144 		bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp,
9145 		       bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp));
9146 		if (bf_get(lpfc_abts_orig, &iocbq->iocb.un.bls_rsp) ==
9147 		    LPFC_ABTS_UNSOL_INT) {
9148 			/* ABTS sent by initiator to CT exchange, the
9149 			 * RX_ID field will be filled with the newly
9150 			 * allocated responder XRI.
9151 			 */
9152 			bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9153 			       iocbq->sli4_xritag);
9154 		} else {
9155 			/* ABTS sent by responder to CT exchange, the
9156 			 * RX_ID field will be filled with the responder
9157 			 * RX_ID from ABTS.
9158 			 */
9159 			bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
9160 			       bf_get(lpfc_abts_rxid, &iocbq->iocb.un.bls_rsp));
9161 		}
9162 		bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
9163 		bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
9164 
9165 		/* Use CT=VPI */
9166 		bf_set(wqe_els_did, &wqe->xmit_bls_rsp.wqe_dest,
9167 			ndlp->nlp_DID);
9168 		bf_set(xmit_bls_rsp64_temprpi, &wqe->xmit_bls_rsp,
9169 			iocbq->iocb.ulpContext);
9170 		bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
9171 		bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
9172 			phba->vpi_ids[phba->pport->vpi]);
9173 		bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
9174 		bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
9175 		       LPFC_WQE_LENLOC_NONE);
9176 		/* Overwrite the pre-set comnd type with OTHER_COMMAND */
9177 		command_type = OTHER_COMMAND;
9178 		if (iocbq->iocb.un.xseq64.w5.hcsw.Rctl == FC_RCTL_BA_RJT) {
9179 			bf_set(xmit_bls_rsp64_rjt_vspec, &wqe->xmit_bls_rsp,
9180 			       bf_get(lpfc_vndr_code, &iocbq->iocb.un.bls_rsp));
9181 			bf_set(xmit_bls_rsp64_rjt_expc, &wqe->xmit_bls_rsp,
9182 			       bf_get(lpfc_rsn_expln, &iocbq->iocb.un.bls_rsp));
9183 			bf_set(xmit_bls_rsp64_rjt_rsnc, &wqe->xmit_bls_rsp,
9184 			       bf_get(lpfc_rsn_code, &iocbq->iocb.un.bls_rsp));
9185 		}
9186 
9187 		break;
9188 	case CMD_SEND_FRAME:
9189 		bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9190 		bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9191 		return 0;
9192 	case CMD_XRI_ABORTED_CX:
9193 	case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
9194 	case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
9195 	case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
9196 	case CMD_FCP_TRSP64_CX: /* Target mode rcv */
9197 	case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
9198 	default:
9199 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9200 				"2014 Invalid command 0x%x\n",
9201 				iocbq->iocb.ulpCommand);
9202 		return IOCB_ERROR;
9203 		break;
9204 	}
9205 
9206 	if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
9207 		bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
9208 	else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
9209 		bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
9210 	else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
9211 		bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
9212 	iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
9213 			      LPFC_IO_DIF_INSERT);
9214 	bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
9215 	bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
9216 	wqe->generic.wqe_com.abort_tag = abort_tag;
9217 	bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
9218 	bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
9219 	bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
9220 	bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
9221 	return 0;
9222 }
9223 
9224 /**
9225  * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9226  * @phba: Pointer to HBA context object.
9227  * @ring_number: SLI ring number to issue iocb on.
9228  * @piocb: Pointer to command iocb.
9229  * @flag: Flag indicating if this command can be put into txq.
9230  *
9231  * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9232  * an iocb command to an HBA with SLI-4 interface spec.
9233  *
9234  * This function is called with hbalock held. The function will return success
9235  * after it successfully submit the iocb to firmware or after adding to the
9236  * txq.
9237  **/
9238 static int
9239 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9240 			 struct lpfc_iocbq *piocb, uint32_t flag)
9241 {
9242 	struct lpfc_sglq *sglq;
9243 	union lpfc_wqe *wqe;
9244 	union lpfc_wqe128 wqe128;
9245 	struct lpfc_queue *wq;
9246 	struct lpfc_sli_ring *pring;
9247 
9248 	/* Get the WQ */
9249 	if ((piocb->iocb_flag & LPFC_IO_FCP) ||
9250 	    (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
9251 		if (!phba->cfg_fof || (!(piocb->iocb_flag & LPFC_IO_OAS)))
9252 			wq = phba->sli4_hba.fcp_wq[piocb->hba_wqidx];
9253 		else
9254 			wq = phba->sli4_hba.oas_wq;
9255 	} else {
9256 		wq = phba->sli4_hba.els_wq;
9257 	}
9258 
9259 	/* Get corresponding ring */
9260 	pring = wq->pring;
9261 
9262 	/*
9263 	 * The WQE can be either 64 or 128 bytes,
9264 	 * so allocate space on the stack assuming the largest.
9265 	 */
9266 	wqe = (union lpfc_wqe *)&wqe128;
9267 
9268 	lockdep_assert_held(&phba->hbalock);
9269 
9270 	if (piocb->sli4_xritag == NO_XRI) {
9271 		if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
9272 		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
9273 			sglq = NULL;
9274 		else {
9275 			if (!list_empty(&pring->txq)) {
9276 				if (!(flag & SLI_IOCB_RET_IOCB)) {
9277 					__lpfc_sli_ringtx_put(phba,
9278 						pring, piocb);
9279 					return IOCB_SUCCESS;
9280 				} else {
9281 					return IOCB_BUSY;
9282 				}
9283 			} else {
9284 				sglq = __lpfc_sli_get_els_sglq(phba, piocb);
9285 				if (!sglq) {
9286 					if (!(flag & SLI_IOCB_RET_IOCB)) {
9287 						__lpfc_sli_ringtx_put(phba,
9288 								pring,
9289 								piocb);
9290 						return IOCB_SUCCESS;
9291 					} else
9292 						return IOCB_BUSY;
9293 				}
9294 			}
9295 		}
9296 	} else if (piocb->iocb_flag &  LPFC_IO_FCP)
9297 		/* These IO's already have an XRI and a mapped sgl. */
9298 		sglq = NULL;
9299 	else {
9300 		/*
9301 		 * This is a continuation of a commandi,(CX) so this
9302 		 * sglq is on the active list
9303 		 */
9304 		sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
9305 		if (!sglq)
9306 			return IOCB_ERROR;
9307 	}
9308 
9309 	if (sglq) {
9310 		piocb->sli4_lxritag = sglq->sli4_lxritag;
9311 		piocb->sli4_xritag = sglq->sli4_xritag;
9312 		if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocb, sglq))
9313 			return IOCB_ERROR;
9314 	}
9315 
9316 	if (lpfc_sli4_iocb2wqe(phba, piocb, wqe))
9317 		return IOCB_ERROR;
9318 
9319 	if (lpfc_sli4_wq_put(wq, wqe))
9320 		return IOCB_ERROR;
9321 	lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
9322 
9323 	return 0;
9324 }
9325 
9326 /**
9327  * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
9328  *
9329  * This routine wraps the actual lockless version for issusing IOCB function
9330  * pointer from the lpfc_hba struct.
9331  *
9332  * Return codes:
9333  * IOCB_ERROR - Error
9334  * IOCB_SUCCESS - Success
9335  * IOCB_BUSY - Busy
9336  **/
9337 int
9338 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9339 		struct lpfc_iocbq *piocb, uint32_t flag)
9340 {
9341 	return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9342 }
9343 
9344 /**
9345  * lpfc_sli_api_table_setup - Set up sli api function jump table
9346  * @phba: The hba struct for which this call is being executed.
9347  * @dev_grp: The HBA PCI-Device group number.
9348  *
9349  * This routine sets up the SLI interface API function jump table in @phba
9350  * struct.
9351  * Returns: 0 - success, -ENODEV - failure.
9352  **/
9353 int
9354 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
9355 {
9356 
9357 	switch (dev_grp) {
9358 	case LPFC_PCI_DEV_LP:
9359 		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
9360 		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
9361 		break;
9362 	case LPFC_PCI_DEV_OC:
9363 		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
9364 		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
9365 		break;
9366 	default:
9367 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9368 				"1419 Invalid HBA PCI-device group: 0x%x\n",
9369 				dev_grp);
9370 		return -ENODEV;
9371 		break;
9372 	}
9373 	phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
9374 	return 0;
9375 }
9376 
9377 /**
9378  * lpfc_sli4_calc_ring - Calculates which ring to use
9379  * @phba: Pointer to HBA context object.
9380  * @piocb: Pointer to command iocb.
9381  *
9382  * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
9383  * hba_wqidx, thus we need to calculate the corresponding ring.
9384  * Since ABORTS must go on the same WQ of the command they are
9385  * aborting, we use command's hba_wqidx.
9386  */
9387 struct lpfc_sli_ring *
9388 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
9389 {
9390 	if (piocb->iocb_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
9391 		if (!(phba->cfg_fof) ||
9392 		    (!(piocb->iocb_flag & LPFC_IO_FOF))) {
9393 			if (unlikely(!phba->sli4_hba.fcp_wq))
9394 				return NULL;
9395 			/*
9396 			 * for abort iocb hba_wqidx should already
9397 			 * be setup based on what work queue we used.
9398 			 */
9399 			if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX))
9400 				piocb->hba_wqidx =
9401 					lpfc_sli4_scmd_to_wqidx_distr(phba,
9402 							      piocb->context1);
9403 			return phba->sli4_hba.fcp_wq[piocb->hba_wqidx]->pring;
9404 		} else {
9405 			if (unlikely(!phba->sli4_hba.oas_wq))
9406 				return NULL;
9407 			piocb->hba_wqidx = 0;
9408 			return phba->sli4_hba.oas_wq->pring;
9409 		}
9410 	} else {
9411 		if (unlikely(!phba->sli4_hba.els_wq))
9412 			return NULL;
9413 		piocb->hba_wqidx = 0;
9414 		return phba->sli4_hba.els_wq->pring;
9415 	}
9416 }
9417 
9418 /**
9419  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
9420  * @phba: Pointer to HBA context object.
9421  * @pring: Pointer to driver SLI ring object.
9422  * @piocb: Pointer to command iocb.
9423  * @flag: Flag indicating if this command can be put into txq.
9424  *
9425  * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
9426  * function. This function gets the hbalock and calls
9427  * __lpfc_sli_issue_iocb function and will return the error returned
9428  * by __lpfc_sli_issue_iocb function. This wrapper is used by
9429  * functions which do not hold hbalock.
9430  **/
9431 int
9432 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
9433 		    struct lpfc_iocbq *piocb, uint32_t flag)
9434 {
9435 	struct lpfc_hba_eq_hdl *hba_eq_hdl;
9436 	struct lpfc_sli_ring *pring;
9437 	struct lpfc_queue *fpeq;
9438 	struct lpfc_eqe *eqe;
9439 	unsigned long iflags;
9440 	int rc, idx;
9441 
9442 	if (phba->sli_rev == LPFC_SLI_REV4) {
9443 		pring = lpfc_sli4_calc_ring(phba, piocb);
9444 		if (unlikely(pring == NULL))
9445 			return IOCB_ERROR;
9446 
9447 		spin_lock_irqsave(&pring->ring_lock, iflags);
9448 		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9449 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
9450 
9451 		if (lpfc_fcp_look_ahead && (piocb->iocb_flag &  LPFC_IO_FCP)) {
9452 			idx = piocb->hba_wqidx;
9453 			hba_eq_hdl = &phba->sli4_hba.hba_eq_hdl[idx];
9454 
9455 			if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use)) {
9456 
9457 				/* Get associated EQ with this index */
9458 				fpeq = phba->sli4_hba.hba_eq[idx];
9459 
9460 				/* Turn off interrupts from this EQ */
9461 				lpfc_sli4_eq_clr_intr(fpeq);
9462 
9463 				/*
9464 				 * Process all the events on FCP EQ
9465 				 */
9466 				while ((eqe = lpfc_sli4_eq_get(fpeq))) {
9467 					lpfc_sli4_hba_handle_eqe(phba,
9468 						eqe, idx);
9469 					fpeq->EQ_processed++;
9470 				}
9471 
9472 				/* Always clear and re-arm the EQ */
9473 				lpfc_sli4_eq_release(fpeq,
9474 					LPFC_QUEUE_REARM);
9475 			}
9476 			atomic_inc(&hba_eq_hdl->hba_eq_in_use);
9477 		}
9478 	} else {
9479 		/* For now, SLI2/3 will still use hbalock */
9480 		spin_lock_irqsave(&phba->hbalock, iflags);
9481 		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
9482 		spin_unlock_irqrestore(&phba->hbalock, iflags);
9483 	}
9484 	return rc;
9485 }
9486 
9487 /**
9488  * lpfc_extra_ring_setup - Extra ring setup function
9489  * @phba: Pointer to HBA context object.
9490  *
9491  * This function is called while driver attaches with the
9492  * HBA to setup the extra ring. The extra ring is used
9493  * only when driver needs to support target mode functionality
9494  * or IP over FC functionalities.
9495  *
9496  * This function is called with no lock held. SLI3 only.
9497  **/
9498 static int
9499 lpfc_extra_ring_setup( struct lpfc_hba *phba)
9500 {
9501 	struct lpfc_sli *psli;
9502 	struct lpfc_sli_ring *pring;
9503 
9504 	psli = &phba->sli;
9505 
9506 	/* Adjust cmd/rsp ring iocb entries more evenly */
9507 
9508 	/* Take some away from the FCP ring */
9509 	pring = &psli->sli3_ring[LPFC_FCP_RING];
9510 	pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9511 	pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9512 	pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9513 	pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9514 
9515 	/* and give them to the extra ring */
9516 	pring = &psli->sli3_ring[LPFC_EXTRA_RING];
9517 
9518 	pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9519 	pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9520 	pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9521 	pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9522 
9523 	/* Setup default profile for this ring */
9524 	pring->iotag_max = 4096;
9525 	pring->num_mask = 1;
9526 	pring->prt[0].profile = 0;      /* Mask 0 */
9527 	pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
9528 	pring->prt[0].type = phba->cfg_multi_ring_type;
9529 	pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
9530 	return 0;
9531 }
9532 
9533 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
9534  * @phba: Pointer to HBA context object.
9535  * @iocbq: Pointer to iocb object.
9536  *
9537  * The async_event handler calls this routine when it receives
9538  * an ASYNC_STATUS_CN event from the port.  The port generates
9539  * this event when an Abort Sequence request to an rport fails
9540  * twice in succession.  The abort could be originated by the
9541  * driver or by the port.  The ABTS could have been for an ELS
9542  * or FCP IO.  The port only generates this event when an ABTS
9543  * fails to complete after one retry.
9544  */
9545 static void
9546 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
9547 			  struct lpfc_iocbq *iocbq)
9548 {
9549 	struct lpfc_nodelist *ndlp = NULL;
9550 	uint16_t rpi = 0, vpi = 0;
9551 	struct lpfc_vport *vport = NULL;
9552 
9553 	/* The rpi in the ulpContext is vport-sensitive. */
9554 	vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
9555 	rpi = iocbq->iocb.ulpContext;
9556 
9557 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9558 			"3092 Port generated ABTS async event "
9559 			"on vpi %d rpi %d status 0x%x\n",
9560 			vpi, rpi, iocbq->iocb.ulpStatus);
9561 
9562 	vport = lpfc_find_vport_by_vpid(phba, vpi);
9563 	if (!vport)
9564 		goto err_exit;
9565 	ndlp = lpfc_findnode_rpi(vport, rpi);
9566 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
9567 		goto err_exit;
9568 
9569 	if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
9570 		lpfc_sli_abts_recover_port(vport, ndlp);
9571 	return;
9572 
9573  err_exit:
9574 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9575 			"3095 Event Context not found, no "
9576 			"action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
9577 			iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
9578 			vpi, rpi);
9579 }
9580 
9581 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
9582  * @phba: pointer to HBA context object.
9583  * @ndlp: nodelist pointer for the impacted rport.
9584  * @axri: pointer to the wcqe containing the failed exchange.
9585  *
9586  * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
9587  * port.  The port generates this event when an abort exchange request to an
9588  * rport fails twice in succession with no reply.  The abort could be originated
9589  * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
9590  */
9591 void
9592 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
9593 			   struct lpfc_nodelist *ndlp,
9594 			   struct sli4_wcqe_xri_aborted *axri)
9595 {
9596 	struct lpfc_vport *vport;
9597 	uint32_t ext_status = 0;
9598 
9599 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
9600 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9601 				"3115 Node Context not found, driver "
9602 				"ignoring abts err event\n");
9603 		return;
9604 	}
9605 
9606 	vport = ndlp->vport;
9607 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9608 			"3116 Port generated FCP XRI ABORT event on "
9609 			"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
9610 			ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
9611 			bf_get(lpfc_wcqe_xa_xri, axri),
9612 			bf_get(lpfc_wcqe_xa_status, axri),
9613 			axri->parameter);
9614 
9615 	/*
9616 	 * Catch the ABTS protocol failure case.  Older OCe FW releases returned
9617 	 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
9618 	 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
9619 	 */
9620 	ext_status = axri->parameter & IOERR_PARAM_MASK;
9621 	if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
9622 	    ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
9623 		lpfc_sli_abts_recover_port(vport, ndlp);
9624 }
9625 
9626 /**
9627  * lpfc_sli_async_event_handler - ASYNC iocb handler function
9628  * @phba: Pointer to HBA context object.
9629  * @pring: Pointer to driver SLI ring object.
9630  * @iocbq: Pointer to iocb object.
9631  *
9632  * This function is called by the slow ring event handler
9633  * function when there is an ASYNC event iocb in the ring.
9634  * This function is called with no lock held.
9635  * Currently this function handles only temperature related
9636  * ASYNC events. The function decodes the temperature sensor
9637  * event message and posts events for the management applications.
9638  **/
9639 static void
9640 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
9641 	struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
9642 {
9643 	IOCB_t *icmd;
9644 	uint16_t evt_code;
9645 	struct temp_event temp_event_data;
9646 	struct Scsi_Host *shost;
9647 	uint32_t *iocb_w;
9648 
9649 	icmd = &iocbq->iocb;
9650 	evt_code = icmd->un.asyncstat.evt_code;
9651 
9652 	switch (evt_code) {
9653 	case ASYNC_TEMP_WARN:
9654 	case ASYNC_TEMP_SAFE:
9655 		temp_event_data.data = (uint32_t) icmd->ulpContext;
9656 		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
9657 		if (evt_code == ASYNC_TEMP_WARN) {
9658 			temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
9659 			lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9660 				"0347 Adapter is very hot, please take "
9661 				"corrective action. temperature : %d Celsius\n",
9662 				(uint32_t) icmd->ulpContext);
9663 		} else {
9664 			temp_event_data.event_code = LPFC_NORMAL_TEMP;
9665 			lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
9666 				"0340 Adapter temperature is OK now. "
9667 				"temperature : %d Celsius\n",
9668 				(uint32_t) icmd->ulpContext);
9669 		}
9670 
9671 		/* Send temperature change event to applications */
9672 		shost = lpfc_shost_from_vport(phba->pport);
9673 		fc_host_post_vendor_event(shost, fc_get_event_number(),
9674 			sizeof(temp_event_data), (char *) &temp_event_data,
9675 			LPFC_NL_VENDOR_ID);
9676 		break;
9677 	case ASYNC_STATUS_CN:
9678 		lpfc_sli_abts_err_handler(phba, iocbq);
9679 		break;
9680 	default:
9681 		iocb_w = (uint32_t *) icmd;
9682 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
9683 			"0346 Ring %d handler: unexpected ASYNC_STATUS"
9684 			" evt_code 0x%x\n"
9685 			"W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
9686 			"W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
9687 			"W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
9688 			"W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
9689 			pring->ringno, icmd->un.asyncstat.evt_code,
9690 			iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
9691 			iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
9692 			iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
9693 			iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
9694 
9695 		break;
9696 	}
9697 }
9698 
9699 
9700 /**
9701  * lpfc_sli4_setup - SLI ring setup function
9702  * @phba: Pointer to HBA context object.
9703  *
9704  * lpfc_sli_setup sets up rings of the SLI interface with
9705  * number of iocbs per ring and iotags. This function is
9706  * called while driver attach to the HBA and before the
9707  * interrupts are enabled. So there is no need for locking.
9708  *
9709  * This function always returns 0.
9710  **/
9711 int
9712 lpfc_sli4_setup(struct lpfc_hba *phba)
9713 {
9714 	struct lpfc_sli_ring *pring;
9715 
9716 	pring = phba->sli4_hba.els_wq->pring;
9717 	pring->num_mask = LPFC_MAX_RING_MASK;
9718 	pring->prt[0].profile = 0;	/* Mask 0 */
9719 	pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9720 	pring->prt[0].type = FC_TYPE_ELS;
9721 	pring->prt[0].lpfc_sli_rcv_unsol_event =
9722 	    lpfc_els_unsol_event;
9723 	pring->prt[1].profile = 0;	/* Mask 1 */
9724 	pring->prt[1].rctl = FC_RCTL_ELS_REP;
9725 	pring->prt[1].type = FC_TYPE_ELS;
9726 	pring->prt[1].lpfc_sli_rcv_unsol_event =
9727 	    lpfc_els_unsol_event;
9728 	pring->prt[2].profile = 0;	/* Mask 2 */
9729 	/* NameServer Inquiry */
9730 	pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9731 	/* NameServer */
9732 	pring->prt[2].type = FC_TYPE_CT;
9733 	pring->prt[2].lpfc_sli_rcv_unsol_event =
9734 	    lpfc_ct_unsol_event;
9735 	pring->prt[3].profile = 0;	/* Mask 3 */
9736 	/* NameServer response */
9737 	pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9738 	/* NameServer */
9739 	pring->prt[3].type = FC_TYPE_CT;
9740 	pring->prt[3].lpfc_sli_rcv_unsol_event =
9741 	    lpfc_ct_unsol_event;
9742 	return 0;
9743 }
9744 
9745 /**
9746  * lpfc_sli_setup - SLI ring setup function
9747  * @phba: Pointer to HBA context object.
9748  *
9749  * lpfc_sli_setup sets up rings of the SLI interface with
9750  * number of iocbs per ring and iotags. This function is
9751  * called while driver attach to the HBA and before the
9752  * interrupts are enabled. So there is no need for locking.
9753  *
9754  * This function always returns 0. SLI3 only.
9755  **/
9756 int
9757 lpfc_sli_setup(struct lpfc_hba *phba)
9758 {
9759 	int i, totiocbsize = 0;
9760 	struct lpfc_sli *psli = &phba->sli;
9761 	struct lpfc_sli_ring *pring;
9762 
9763 	psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
9764 	psli->sli_flag = 0;
9765 
9766 	psli->iocbq_lookup = NULL;
9767 	psli->iocbq_lookup_len = 0;
9768 	psli->last_iotag = 0;
9769 
9770 	for (i = 0; i < psli->num_rings; i++) {
9771 		pring = &psli->sli3_ring[i];
9772 		switch (i) {
9773 		case LPFC_FCP_RING:	/* ring 0 - FCP */
9774 			/* numCiocb and numRiocb are used in config_port */
9775 			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
9776 			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
9777 			pring->sli.sli3.numCiocb +=
9778 				SLI2_IOCB_CMD_R1XTRA_ENTRIES;
9779 			pring->sli.sli3.numRiocb +=
9780 				SLI2_IOCB_RSP_R1XTRA_ENTRIES;
9781 			pring->sli.sli3.numCiocb +=
9782 				SLI2_IOCB_CMD_R3XTRA_ENTRIES;
9783 			pring->sli.sli3.numRiocb +=
9784 				SLI2_IOCB_RSP_R3XTRA_ENTRIES;
9785 			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9786 							SLI3_IOCB_CMD_SIZE :
9787 							SLI2_IOCB_CMD_SIZE;
9788 			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9789 							SLI3_IOCB_RSP_SIZE :
9790 							SLI2_IOCB_RSP_SIZE;
9791 			pring->iotag_ctr = 0;
9792 			pring->iotag_max =
9793 			    (phba->cfg_hba_queue_depth * 2);
9794 			pring->fast_iotag = pring->iotag_max;
9795 			pring->num_mask = 0;
9796 			break;
9797 		case LPFC_EXTRA_RING:	/* ring 1 - EXTRA */
9798 			/* numCiocb and numRiocb are used in config_port */
9799 			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
9800 			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
9801 			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9802 							SLI3_IOCB_CMD_SIZE :
9803 							SLI2_IOCB_CMD_SIZE;
9804 			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9805 							SLI3_IOCB_RSP_SIZE :
9806 							SLI2_IOCB_RSP_SIZE;
9807 			pring->iotag_max = phba->cfg_hba_queue_depth;
9808 			pring->num_mask = 0;
9809 			break;
9810 		case LPFC_ELS_RING:	/* ring 2 - ELS / CT */
9811 			/* numCiocb and numRiocb are used in config_port */
9812 			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
9813 			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
9814 			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
9815 							SLI3_IOCB_CMD_SIZE :
9816 							SLI2_IOCB_CMD_SIZE;
9817 			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
9818 							SLI3_IOCB_RSP_SIZE :
9819 							SLI2_IOCB_RSP_SIZE;
9820 			pring->fast_iotag = 0;
9821 			pring->iotag_ctr = 0;
9822 			pring->iotag_max = 4096;
9823 			pring->lpfc_sli_rcv_async_status =
9824 				lpfc_sli_async_event_handler;
9825 			pring->num_mask = LPFC_MAX_RING_MASK;
9826 			pring->prt[0].profile = 0;	/* Mask 0 */
9827 			pring->prt[0].rctl = FC_RCTL_ELS_REQ;
9828 			pring->prt[0].type = FC_TYPE_ELS;
9829 			pring->prt[0].lpfc_sli_rcv_unsol_event =
9830 			    lpfc_els_unsol_event;
9831 			pring->prt[1].profile = 0;	/* Mask 1 */
9832 			pring->prt[1].rctl = FC_RCTL_ELS_REP;
9833 			pring->prt[1].type = FC_TYPE_ELS;
9834 			pring->prt[1].lpfc_sli_rcv_unsol_event =
9835 			    lpfc_els_unsol_event;
9836 			pring->prt[2].profile = 0;	/* Mask 2 */
9837 			/* NameServer Inquiry */
9838 			pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
9839 			/* NameServer */
9840 			pring->prt[2].type = FC_TYPE_CT;
9841 			pring->prt[2].lpfc_sli_rcv_unsol_event =
9842 			    lpfc_ct_unsol_event;
9843 			pring->prt[3].profile = 0;	/* Mask 3 */
9844 			/* NameServer response */
9845 			pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
9846 			/* NameServer */
9847 			pring->prt[3].type = FC_TYPE_CT;
9848 			pring->prt[3].lpfc_sli_rcv_unsol_event =
9849 			    lpfc_ct_unsol_event;
9850 			break;
9851 		}
9852 		totiocbsize += (pring->sli.sli3.numCiocb *
9853 			pring->sli.sli3.sizeCiocb) +
9854 			(pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
9855 	}
9856 	if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
9857 		/* Too many cmd / rsp ring entries in SLI2 SLIM */
9858 		printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
9859 		       "SLI2 SLIM Data: x%x x%lx\n",
9860 		       phba->brd_no, totiocbsize,
9861 		       (unsigned long) MAX_SLIM_IOCB_SIZE);
9862 	}
9863 	if (phba->cfg_multi_ring_support == 2)
9864 		lpfc_extra_ring_setup(phba);
9865 
9866 	return 0;
9867 }
9868 
9869 /**
9870  * lpfc_sli4_queue_init - Queue initialization function
9871  * @phba: Pointer to HBA context object.
9872  *
9873  * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
9874  * ring. This function also initializes ring indices of each ring.
9875  * This function is called during the initialization of the SLI
9876  * interface of an HBA.
9877  * This function is called with no lock held and always returns
9878  * 1.
9879  **/
9880 void
9881 lpfc_sli4_queue_init(struct lpfc_hba *phba)
9882 {
9883 	struct lpfc_sli *psli;
9884 	struct lpfc_sli_ring *pring;
9885 	int i;
9886 
9887 	psli = &phba->sli;
9888 	spin_lock_irq(&phba->hbalock);
9889 	INIT_LIST_HEAD(&psli->mboxq);
9890 	INIT_LIST_HEAD(&psli->mboxq_cmpl);
9891 	/* Initialize list headers for txq and txcmplq as double linked lists */
9892 	for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
9893 		pring = phba->sli4_hba.fcp_wq[i]->pring;
9894 		pring->flag = 0;
9895 		pring->ringno = LPFC_FCP_RING;
9896 		INIT_LIST_HEAD(&pring->txq);
9897 		INIT_LIST_HEAD(&pring->txcmplq);
9898 		INIT_LIST_HEAD(&pring->iocb_continueq);
9899 		spin_lock_init(&pring->ring_lock);
9900 	}
9901 	for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
9902 		pring = phba->sli4_hba.nvme_wq[i]->pring;
9903 		pring->flag = 0;
9904 		pring->ringno = LPFC_FCP_RING;
9905 		INIT_LIST_HEAD(&pring->txq);
9906 		INIT_LIST_HEAD(&pring->txcmplq);
9907 		INIT_LIST_HEAD(&pring->iocb_continueq);
9908 		spin_lock_init(&pring->ring_lock);
9909 	}
9910 	pring = phba->sli4_hba.els_wq->pring;
9911 	pring->flag = 0;
9912 	pring->ringno = LPFC_ELS_RING;
9913 	INIT_LIST_HEAD(&pring->txq);
9914 	INIT_LIST_HEAD(&pring->txcmplq);
9915 	INIT_LIST_HEAD(&pring->iocb_continueq);
9916 	spin_lock_init(&pring->ring_lock);
9917 
9918 	if (phba->cfg_nvme_io_channel) {
9919 		pring = phba->sli4_hba.nvmels_wq->pring;
9920 		pring->flag = 0;
9921 		pring->ringno = LPFC_ELS_RING;
9922 		INIT_LIST_HEAD(&pring->txq);
9923 		INIT_LIST_HEAD(&pring->txcmplq);
9924 		INIT_LIST_HEAD(&pring->iocb_continueq);
9925 		spin_lock_init(&pring->ring_lock);
9926 	}
9927 
9928 	if (phba->cfg_fof) {
9929 		pring = phba->sli4_hba.oas_wq->pring;
9930 		pring->flag = 0;
9931 		pring->ringno = LPFC_FCP_RING;
9932 		INIT_LIST_HEAD(&pring->txq);
9933 		INIT_LIST_HEAD(&pring->txcmplq);
9934 		INIT_LIST_HEAD(&pring->iocb_continueq);
9935 		spin_lock_init(&pring->ring_lock);
9936 	}
9937 
9938 	spin_unlock_irq(&phba->hbalock);
9939 }
9940 
9941 /**
9942  * lpfc_sli_queue_init - Queue initialization function
9943  * @phba: Pointer to HBA context object.
9944  *
9945  * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
9946  * ring. This function also initializes ring indices of each ring.
9947  * This function is called during the initialization of the SLI
9948  * interface of an HBA.
9949  * This function is called with no lock held and always returns
9950  * 1.
9951  **/
9952 void
9953 lpfc_sli_queue_init(struct lpfc_hba *phba)
9954 {
9955 	struct lpfc_sli *psli;
9956 	struct lpfc_sli_ring *pring;
9957 	int i;
9958 
9959 	psli = &phba->sli;
9960 	spin_lock_irq(&phba->hbalock);
9961 	INIT_LIST_HEAD(&psli->mboxq);
9962 	INIT_LIST_HEAD(&psli->mboxq_cmpl);
9963 	/* Initialize list headers for txq and txcmplq as double linked lists */
9964 	for (i = 0; i < psli->num_rings; i++) {
9965 		pring = &psli->sli3_ring[i];
9966 		pring->ringno = i;
9967 		pring->sli.sli3.next_cmdidx  = 0;
9968 		pring->sli.sli3.local_getidx = 0;
9969 		pring->sli.sli3.cmdidx = 0;
9970 		INIT_LIST_HEAD(&pring->iocb_continueq);
9971 		INIT_LIST_HEAD(&pring->iocb_continue_saveq);
9972 		INIT_LIST_HEAD(&pring->postbufq);
9973 		pring->flag = 0;
9974 		INIT_LIST_HEAD(&pring->txq);
9975 		INIT_LIST_HEAD(&pring->txcmplq);
9976 		spin_lock_init(&pring->ring_lock);
9977 	}
9978 	spin_unlock_irq(&phba->hbalock);
9979 }
9980 
9981 /**
9982  * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
9983  * @phba: Pointer to HBA context object.
9984  *
9985  * This routine flushes the mailbox command subsystem. It will unconditionally
9986  * flush all the mailbox commands in the three possible stages in the mailbox
9987  * command sub-system: pending mailbox command queue; the outstanding mailbox
9988  * command; and completed mailbox command queue. It is caller's responsibility
9989  * to make sure that the driver is in the proper state to flush the mailbox
9990  * command sub-system. Namely, the posting of mailbox commands into the
9991  * pending mailbox command queue from the various clients must be stopped;
9992  * either the HBA is in a state that it will never works on the outstanding
9993  * mailbox command (such as in EEH or ERATT conditions) or the outstanding
9994  * mailbox command has been completed.
9995  **/
9996 static void
9997 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
9998 {
9999 	LIST_HEAD(completions);
10000 	struct lpfc_sli *psli = &phba->sli;
10001 	LPFC_MBOXQ_t *pmb;
10002 	unsigned long iflag;
10003 
10004 	/* Flush all the mailbox commands in the mbox system */
10005 	spin_lock_irqsave(&phba->hbalock, iflag);
10006 	/* The pending mailbox command queue */
10007 	list_splice_init(&phba->sli.mboxq, &completions);
10008 	/* The outstanding active mailbox command */
10009 	if (psli->mbox_active) {
10010 		list_add_tail(&psli->mbox_active->list, &completions);
10011 		psli->mbox_active = NULL;
10012 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10013 	}
10014 	/* The completed mailbox command queue */
10015 	list_splice_init(&phba->sli.mboxq_cmpl, &completions);
10016 	spin_unlock_irqrestore(&phba->hbalock, iflag);
10017 
10018 	/* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10019 	while (!list_empty(&completions)) {
10020 		list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
10021 		pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
10022 		if (pmb->mbox_cmpl)
10023 			pmb->mbox_cmpl(phba, pmb);
10024 	}
10025 }
10026 
10027 /**
10028  * lpfc_sli_host_down - Vport cleanup function
10029  * @vport: Pointer to virtual port object.
10030  *
10031  * lpfc_sli_host_down is called to clean up the resources
10032  * associated with a vport before destroying virtual
10033  * port data structures.
10034  * This function does following operations:
10035  * - Free discovery resources associated with this virtual
10036  *   port.
10037  * - Free iocbs associated with this virtual port in
10038  *   the txq.
10039  * - Send abort for all iocb commands associated with this
10040  *   vport in txcmplq.
10041  *
10042  * This function is called with no lock held and always returns 1.
10043  **/
10044 int
10045 lpfc_sli_host_down(struct lpfc_vport *vport)
10046 {
10047 	LIST_HEAD(completions);
10048 	struct lpfc_hba *phba = vport->phba;
10049 	struct lpfc_sli *psli = &phba->sli;
10050 	struct lpfc_queue *qp = NULL;
10051 	struct lpfc_sli_ring *pring;
10052 	struct lpfc_iocbq *iocb, *next_iocb;
10053 	int i;
10054 	unsigned long flags = 0;
10055 	uint16_t prev_pring_flag;
10056 
10057 	lpfc_cleanup_discovery_resources(vport);
10058 
10059 	spin_lock_irqsave(&phba->hbalock, flags);
10060 
10061 	/*
10062 	 * Error everything on the txq since these iocbs
10063 	 * have not been given to the FW yet.
10064 	 * Also issue ABTS for everything on the txcmplq
10065 	 */
10066 	if (phba->sli_rev != LPFC_SLI_REV4) {
10067 		for (i = 0; i < psli->num_rings; i++) {
10068 			pring = &psli->sli3_ring[i];
10069 			prev_pring_flag = pring->flag;
10070 			/* Only slow rings */
10071 			if (pring->ringno == LPFC_ELS_RING) {
10072 				pring->flag |= LPFC_DEFERRED_RING_EVENT;
10073 				/* Set the lpfc data pending flag */
10074 				set_bit(LPFC_DATA_READY, &phba->data_flags);
10075 			}
10076 			list_for_each_entry_safe(iocb, next_iocb,
10077 						 &pring->txq, list) {
10078 				if (iocb->vport != vport)
10079 					continue;
10080 				list_move_tail(&iocb->list, &completions);
10081 			}
10082 			list_for_each_entry_safe(iocb, next_iocb,
10083 						 &pring->txcmplq, list) {
10084 				if (iocb->vport != vport)
10085 					continue;
10086 				lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10087 			}
10088 			pring->flag = prev_pring_flag;
10089 		}
10090 	} else {
10091 		list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10092 			pring = qp->pring;
10093 			if (!pring)
10094 				continue;
10095 			if (pring == phba->sli4_hba.els_wq->pring) {
10096 				pring->flag |= LPFC_DEFERRED_RING_EVENT;
10097 				/* Set the lpfc data pending flag */
10098 				set_bit(LPFC_DATA_READY, &phba->data_flags);
10099 			}
10100 			prev_pring_flag = pring->flag;
10101 			spin_lock_irq(&pring->ring_lock);
10102 			list_for_each_entry_safe(iocb, next_iocb,
10103 						 &pring->txq, list) {
10104 				if (iocb->vport != vport)
10105 					continue;
10106 				list_move_tail(&iocb->list, &completions);
10107 			}
10108 			spin_unlock_irq(&pring->ring_lock);
10109 			list_for_each_entry_safe(iocb, next_iocb,
10110 						 &pring->txcmplq, list) {
10111 				if (iocb->vport != vport)
10112 					continue;
10113 				lpfc_sli_issue_abort_iotag(phba, pring, iocb);
10114 			}
10115 			pring->flag = prev_pring_flag;
10116 		}
10117 	}
10118 	spin_unlock_irqrestore(&phba->hbalock, flags);
10119 
10120 	/* Cancel all the IOCBs from the completions list */
10121 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10122 			      IOERR_SLI_DOWN);
10123 	return 1;
10124 }
10125 
10126 /**
10127  * lpfc_sli_hba_down - Resource cleanup function for the HBA
10128  * @phba: Pointer to HBA context object.
10129  *
10130  * This function cleans up all iocb, buffers, mailbox commands
10131  * while shutting down the HBA. This function is called with no
10132  * lock held and always returns 1.
10133  * This function does the following to cleanup driver resources:
10134  * - Free discovery resources for each virtual port
10135  * - Cleanup any pending fabric iocbs
10136  * - Iterate through the iocb txq and free each entry
10137  *   in the list.
10138  * - Free up any buffer posted to the HBA
10139  * - Free mailbox commands in the mailbox queue.
10140  **/
10141 int
10142 lpfc_sli_hba_down(struct lpfc_hba *phba)
10143 {
10144 	LIST_HEAD(completions);
10145 	struct lpfc_sli *psli = &phba->sli;
10146 	struct lpfc_queue *qp = NULL;
10147 	struct lpfc_sli_ring *pring;
10148 	struct lpfc_dmabuf *buf_ptr;
10149 	unsigned long flags = 0;
10150 	int i;
10151 
10152 	/* Shutdown the mailbox command sub-system */
10153 	lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
10154 
10155 	lpfc_hba_down_prep(phba);
10156 
10157 	lpfc_fabric_abort_hba(phba);
10158 
10159 	spin_lock_irqsave(&phba->hbalock, flags);
10160 
10161 	/*
10162 	 * Error everything on the txq since these iocbs
10163 	 * have not been given to the FW yet.
10164 	 */
10165 	if (phba->sli_rev != LPFC_SLI_REV4) {
10166 		for (i = 0; i < psli->num_rings; i++) {
10167 			pring = &psli->sli3_ring[i];
10168 			/* Only slow rings */
10169 			if (pring->ringno == LPFC_ELS_RING) {
10170 				pring->flag |= LPFC_DEFERRED_RING_EVENT;
10171 				/* Set the lpfc data pending flag */
10172 				set_bit(LPFC_DATA_READY, &phba->data_flags);
10173 			}
10174 			list_splice_init(&pring->txq, &completions);
10175 		}
10176 	} else {
10177 		list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10178 			pring = qp->pring;
10179 			if (!pring)
10180 				continue;
10181 			spin_lock_irq(&pring->ring_lock);
10182 			list_splice_init(&pring->txq, &completions);
10183 			spin_unlock_irq(&pring->ring_lock);
10184 			if (pring == phba->sli4_hba.els_wq->pring) {
10185 				pring->flag |= LPFC_DEFERRED_RING_EVENT;
10186 				/* Set the lpfc data pending flag */
10187 				set_bit(LPFC_DATA_READY, &phba->data_flags);
10188 			}
10189 		}
10190 	}
10191 	spin_unlock_irqrestore(&phba->hbalock, flags);
10192 
10193 	/* Cancel all the IOCBs from the completions list */
10194 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
10195 			      IOERR_SLI_DOWN);
10196 
10197 	spin_lock_irqsave(&phba->hbalock, flags);
10198 	list_splice_init(&phba->elsbuf, &completions);
10199 	phba->elsbuf_cnt = 0;
10200 	phba->elsbuf_prev_cnt = 0;
10201 	spin_unlock_irqrestore(&phba->hbalock, flags);
10202 
10203 	while (!list_empty(&completions)) {
10204 		list_remove_head(&completions, buf_ptr,
10205 			struct lpfc_dmabuf, list);
10206 		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
10207 		kfree(buf_ptr);
10208 	}
10209 
10210 	/* Return any active mbox cmds */
10211 	del_timer_sync(&psli->mbox_tmo);
10212 
10213 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
10214 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
10215 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
10216 
10217 	return 1;
10218 }
10219 
10220 /**
10221  * lpfc_sli_pcimem_bcopy - SLI memory copy function
10222  * @srcp: Source memory pointer.
10223  * @destp: Destination memory pointer.
10224  * @cnt: Number of words required to be copied.
10225  *
10226  * This function is used for copying data between driver memory
10227  * and the SLI memory. This function also changes the endianness
10228  * of each word if native endianness is different from SLI
10229  * endianness. This function can be called with or without
10230  * lock.
10231  **/
10232 void
10233 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
10234 {
10235 	uint32_t *src = srcp;
10236 	uint32_t *dest = destp;
10237 	uint32_t ldata;
10238 	int i;
10239 
10240 	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
10241 		ldata = *src;
10242 		ldata = le32_to_cpu(ldata);
10243 		*dest = ldata;
10244 		src++;
10245 		dest++;
10246 	}
10247 }
10248 
10249 
10250 /**
10251  * lpfc_sli_bemem_bcopy - SLI memory copy function
10252  * @srcp: Source memory pointer.
10253  * @destp: Destination memory pointer.
10254  * @cnt: Number of words required to be copied.
10255  *
10256  * This function is used for copying data between a data structure
10257  * with big endian representation to local endianness.
10258  * This function can be called with or without lock.
10259  **/
10260 void
10261 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
10262 {
10263 	uint32_t *src = srcp;
10264 	uint32_t *dest = destp;
10265 	uint32_t ldata;
10266 	int i;
10267 
10268 	for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
10269 		ldata = *src;
10270 		ldata = be32_to_cpu(ldata);
10271 		*dest = ldata;
10272 		src++;
10273 		dest++;
10274 	}
10275 }
10276 
10277 /**
10278  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
10279  * @phba: Pointer to HBA context object.
10280  * @pring: Pointer to driver SLI ring object.
10281  * @mp: Pointer to driver buffer object.
10282  *
10283  * This function is called with no lock held.
10284  * It always return zero after adding the buffer to the postbufq
10285  * buffer list.
10286  **/
10287 int
10288 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10289 			 struct lpfc_dmabuf *mp)
10290 {
10291 	/* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10292 	   later */
10293 	spin_lock_irq(&phba->hbalock);
10294 	list_add_tail(&mp->list, &pring->postbufq);
10295 	pring->postbufq_cnt++;
10296 	spin_unlock_irq(&phba->hbalock);
10297 	return 0;
10298 }
10299 
10300 /**
10301  * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
10302  * @phba: Pointer to HBA context object.
10303  *
10304  * When HBQ is enabled, buffers are searched based on tags. This function
10305  * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10306  * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10307  * does not conflict with tags of buffer posted for unsolicited events.
10308  * The function returns the allocated tag. The function is called with
10309  * no locks held.
10310  **/
10311 uint32_t
10312 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
10313 {
10314 	spin_lock_irq(&phba->hbalock);
10315 	phba->buffer_tag_count++;
10316 	/*
10317 	 * Always set the QUE_BUFTAG_BIT to distiguish between
10318 	 * a tag assigned by HBQ.
10319 	 */
10320 	phba->buffer_tag_count |= QUE_BUFTAG_BIT;
10321 	spin_unlock_irq(&phba->hbalock);
10322 	return phba->buffer_tag_count;
10323 }
10324 
10325 /**
10326  * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
10327  * @phba: Pointer to HBA context object.
10328  * @pring: Pointer to driver SLI ring object.
10329  * @tag: Buffer tag.
10330  *
10331  * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
10332  * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
10333  * iocb is posted to the response ring with the tag of the buffer.
10334  * This function searches the pring->postbufq list using the tag
10335  * to find buffer associated with CMD_IOCB_RET_XRI64_CX
10336  * iocb. If the buffer is found then lpfc_dmabuf object of the
10337  * buffer is returned to the caller else NULL is returned.
10338  * This function is called with no lock held.
10339  **/
10340 struct lpfc_dmabuf *
10341 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10342 			uint32_t tag)
10343 {
10344 	struct lpfc_dmabuf *mp, *next_mp;
10345 	struct list_head *slp = &pring->postbufq;
10346 
10347 	/* Search postbufq, from the beginning, looking for a match on tag */
10348 	spin_lock_irq(&phba->hbalock);
10349 	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10350 		if (mp->buffer_tag == tag) {
10351 			list_del_init(&mp->list);
10352 			pring->postbufq_cnt--;
10353 			spin_unlock_irq(&phba->hbalock);
10354 			return mp;
10355 		}
10356 	}
10357 
10358 	spin_unlock_irq(&phba->hbalock);
10359 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10360 			"0402 Cannot find virtual addr for buffer tag on "
10361 			"ring %d Data x%lx x%p x%p x%x\n",
10362 			pring->ringno, (unsigned long) tag,
10363 			slp->next, slp->prev, pring->postbufq_cnt);
10364 
10365 	return NULL;
10366 }
10367 
10368 /**
10369  * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
10370  * @phba: Pointer to HBA context object.
10371  * @pring: Pointer to driver SLI ring object.
10372  * @phys: DMA address of the buffer.
10373  *
10374  * This function searches the buffer list using the dma_address
10375  * of unsolicited event to find the driver's lpfc_dmabuf object
10376  * corresponding to the dma_address. The function returns the
10377  * lpfc_dmabuf object if a buffer is found else it returns NULL.
10378  * This function is called by the ct and els unsolicited event
10379  * handlers to get the buffer associated with the unsolicited
10380  * event.
10381  *
10382  * This function is called with no lock held.
10383  **/
10384 struct lpfc_dmabuf *
10385 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10386 			 dma_addr_t phys)
10387 {
10388 	struct lpfc_dmabuf *mp, *next_mp;
10389 	struct list_head *slp = &pring->postbufq;
10390 
10391 	/* Search postbufq, from the beginning, looking for a match on phys */
10392 	spin_lock_irq(&phba->hbalock);
10393 	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
10394 		if (mp->phys == phys) {
10395 			list_del_init(&mp->list);
10396 			pring->postbufq_cnt--;
10397 			spin_unlock_irq(&phba->hbalock);
10398 			return mp;
10399 		}
10400 	}
10401 
10402 	spin_unlock_irq(&phba->hbalock);
10403 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10404 			"0410 Cannot find virtual addr for mapped buf on "
10405 			"ring %d Data x%llx x%p x%p x%x\n",
10406 			pring->ringno, (unsigned long long)phys,
10407 			slp->next, slp->prev, pring->postbufq_cnt);
10408 	return NULL;
10409 }
10410 
10411 /**
10412  * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
10413  * @phba: Pointer to HBA context object.
10414  * @cmdiocb: Pointer to driver command iocb object.
10415  * @rspiocb: Pointer to driver response iocb object.
10416  *
10417  * This function is the completion handler for the abort iocbs for
10418  * ELS commands. This function is called from the ELS ring event
10419  * handler with no lock held. This function frees memory resources
10420  * associated with the abort iocb.
10421  **/
10422 static void
10423 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10424 			struct lpfc_iocbq *rspiocb)
10425 {
10426 	IOCB_t *irsp = &rspiocb->iocb;
10427 	uint16_t abort_iotag, abort_context;
10428 	struct lpfc_iocbq *abort_iocb = NULL;
10429 
10430 	if (irsp->ulpStatus) {
10431 
10432 		/*
10433 		 * Assume that the port already completed and returned, or
10434 		 * will return the iocb. Just Log the message.
10435 		 */
10436 		abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
10437 		abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
10438 
10439 		spin_lock_irq(&phba->hbalock);
10440 		if (phba->sli_rev < LPFC_SLI_REV4) {
10441 			if (abort_iotag != 0 &&
10442 				abort_iotag <= phba->sli.last_iotag)
10443 				abort_iocb =
10444 					phba->sli.iocbq_lookup[abort_iotag];
10445 		} else
10446 			/* For sli4 the abort_tag is the XRI,
10447 			 * so the abort routine puts the iotag  of the iocb
10448 			 * being aborted in the context field of the abort
10449 			 * IOCB.
10450 			 */
10451 			abort_iocb = phba->sli.iocbq_lookup[abort_context];
10452 
10453 		lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
10454 				"0327 Cannot abort els iocb %p "
10455 				"with tag %x context %x, abort status %x, "
10456 				"abort code %x\n",
10457 				abort_iocb, abort_iotag, abort_context,
10458 				irsp->ulpStatus, irsp->un.ulpWord[4]);
10459 
10460 		spin_unlock_irq(&phba->hbalock);
10461 	}
10462 	lpfc_sli_release_iocbq(phba, cmdiocb);
10463 	return;
10464 }
10465 
10466 /**
10467  * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
10468  * @phba: Pointer to HBA context object.
10469  * @cmdiocb: Pointer to driver command iocb object.
10470  * @rspiocb: Pointer to driver response iocb object.
10471  *
10472  * The function is called from SLI ring event handler with no
10473  * lock held. This function is the completion handler for ELS commands
10474  * which are aborted. The function frees memory resources used for
10475  * the aborted ELS commands.
10476  **/
10477 static void
10478 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10479 		     struct lpfc_iocbq *rspiocb)
10480 {
10481 	IOCB_t *irsp = &rspiocb->iocb;
10482 
10483 	/* ELS cmd tag <ulpIoTag> completes */
10484 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
10485 			"0139 Ignoring ELS cmd tag x%x completion Data: "
10486 			"x%x x%x x%x\n",
10487 			irsp->ulpIoTag, irsp->ulpStatus,
10488 			irsp->un.ulpWord[4], irsp->ulpTimeout);
10489 	if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
10490 		lpfc_ct_free_iocb(phba, cmdiocb);
10491 	else
10492 		lpfc_els_free_iocb(phba, cmdiocb);
10493 	return;
10494 }
10495 
10496 /**
10497  * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
10498  * @phba: Pointer to HBA context object.
10499  * @pring: Pointer to driver SLI ring object.
10500  * @cmdiocb: Pointer to driver command iocb object.
10501  *
10502  * This function issues an abort iocb for the provided command iocb down to
10503  * the port. Other than the case the outstanding command iocb is an abort
10504  * request, this function issues abort out unconditionally. This function is
10505  * called with hbalock held. The function returns 0 when it fails due to
10506  * memory allocation failure or when the command iocb is an abort request.
10507  **/
10508 static int
10509 lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10510 			   struct lpfc_iocbq *cmdiocb)
10511 {
10512 	struct lpfc_vport *vport = cmdiocb->vport;
10513 	struct lpfc_iocbq *abtsiocbp;
10514 	IOCB_t *icmd = NULL;
10515 	IOCB_t *iabt = NULL;
10516 	int retval;
10517 	unsigned long iflags;
10518 
10519 	lockdep_assert_held(&phba->hbalock);
10520 
10521 	/*
10522 	 * There are certain command types we don't want to abort.  And we
10523 	 * don't want to abort commands that are already in the process of
10524 	 * being aborted.
10525 	 */
10526 	icmd = &cmdiocb->iocb;
10527 	if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10528 	    icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10529 	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10530 		return 0;
10531 
10532 	/* issue ABTS for this IOCB based on iotag */
10533 	abtsiocbp = __lpfc_sli_get_iocbq(phba);
10534 	if (abtsiocbp == NULL)
10535 		return 0;
10536 
10537 	/* This signals the response to set the correct status
10538 	 * before calling the completion handler
10539 	 */
10540 	cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10541 
10542 	iabt = &abtsiocbp->iocb;
10543 	iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
10544 	iabt->un.acxri.abortContextTag = icmd->ulpContext;
10545 	if (phba->sli_rev == LPFC_SLI_REV4) {
10546 		iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
10547 		iabt->un.acxri.abortContextTag = cmdiocb->iotag;
10548 	}
10549 	else
10550 		iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
10551 	iabt->ulpLe = 1;
10552 	iabt->ulpClass = icmd->ulpClass;
10553 
10554 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
10555 	abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
10556 	if (cmdiocb->iocb_flag & LPFC_IO_FCP)
10557 		abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
10558 	if (cmdiocb->iocb_flag & LPFC_IO_FOF)
10559 		abtsiocbp->iocb_flag |= LPFC_IO_FOF;
10560 
10561 	if (phba->link_state >= LPFC_LINK_UP)
10562 		iabt->ulpCommand = CMD_ABORT_XRI_CN;
10563 	else
10564 		iabt->ulpCommand = CMD_CLOSE_XRI_CN;
10565 
10566 	abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
10567 	abtsiocbp->vport = vport;
10568 
10569 	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
10570 			 "0339 Abort xri x%x, original iotag x%x, "
10571 			 "abort cmd iotag x%x\n",
10572 			 iabt->un.acxri.abortIoTag,
10573 			 iabt->un.acxri.abortContextTag,
10574 			 abtsiocbp->iotag);
10575 
10576 	if (phba->sli_rev == LPFC_SLI_REV4) {
10577 		pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
10578 		if (unlikely(pring == NULL))
10579 			return 0;
10580 		/* Note: both hbalock and ring_lock need to be set here */
10581 		spin_lock_irqsave(&pring->ring_lock, iflags);
10582 		retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10583 			abtsiocbp, 0);
10584 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
10585 	} else {
10586 		retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
10587 			abtsiocbp, 0);
10588 	}
10589 
10590 	if (retval)
10591 		__lpfc_sli_release_iocbq(phba, abtsiocbp);
10592 
10593 	/*
10594 	 * Caller to this routine should check for IOCB_ERROR
10595 	 * and handle it properly.  This routine no longer removes
10596 	 * iocb off txcmplq and call compl in case of IOCB_ERROR.
10597 	 */
10598 	return retval;
10599 }
10600 
10601 /**
10602  * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
10603  * @phba: Pointer to HBA context object.
10604  * @pring: Pointer to driver SLI ring object.
10605  * @cmdiocb: Pointer to driver command iocb object.
10606  *
10607  * This function issues an abort iocb for the provided command iocb. In case
10608  * of unloading, the abort iocb will not be issued to commands on the ELS
10609  * ring. Instead, the callback function shall be changed to those commands
10610  * so that nothing happens when them finishes. This function is called with
10611  * hbalock held. The function returns 0 when the command iocb is an abort
10612  * request.
10613  **/
10614 int
10615 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10616 			   struct lpfc_iocbq *cmdiocb)
10617 {
10618 	struct lpfc_vport *vport = cmdiocb->vport;
10619 	int retval = IOCB_ERROR;
10620 	IOCB_t *icmd = NULL;
10621 
10622 	lockdep_assert_held(&phba->hbalock);
10623 
10624 	/*
10625 	 * There are certain command types we don't want to abort.  And we
10626 	 * don't want to abort commands that are already in the process of
10627 	 * being aborted.
10628 	 */
10629 	icmd = &cmdiocb->iocb;
10630 	if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
10631 	    icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
10632 	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10633 		return 0;
10634 
10635 	/*
10636 	 * If we're unloading, don't abort iocb on the ELS ring, but change
10637 	 * the callback so that nothing happens when it finishes.
10638 	 */
10639 	if ((vport->load_flag & FC_UNLOADING) &&
10640 	    (pring->ringno == LPFC_ELS_RING)) {
10641 		if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
10642 			cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
10643 		else
10644 			cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
10645 		goto abort_iotag_exit;
10646 	}
10647 
10648 	/* Now, we try to issue the abort to the cmdiocb out */
10649 	retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
10650 
10651 abort_iotag_exit:
10652 	/*
10653 	 * Caller to this routine should check for IOCB_ERROR
10654 	 * and handle it properly.  This routine no longer removes
10655 	 * iocb off txcmplq and call compl in case of IOCB_ERROR.
10656 	 */
10657 	return retval;
10658 }
10659 
10660 /**
10661  * lpfc_sli4_abort_nvme_io - Issue abort for a command iocb
10662  * @phba: Pointer to HBA context object.
10663  * @pring: Pointer to driver SLI ring object.
10664  * @cmdiocb: Pointer to driver command iocb object.
10665  *
10666  * This function issues an abort iocb for the provided command iocb down to
10667  * the port. Other than the case the outstanding command iocb is an abort
10668  * request, this function issues abort out unconditionally. This function is
10669  * called with hbalock held. The function returns 0 when it fails due to
10670  * memory allocation failure or when the command iocb is an abort request.
10671  **/
10672 static int
10673 lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10674 			struct lpfc_iocbq *cmdiocb)
10675 {
10676 	struct lpfc_vport *vport = cmdiocb->vport;
10677 	struct lpfc_iocbq *abtsiocbp;
10678 	union lpfc_wqe *abts_wqe;
10679 	int retval;
10680 
10681 	/*
10682 	 * There are certain command types we don't want to abort.  And we
10683 	 * don't want to abort commands that are already in the process of
10684 	 * being aborted.
10685 	 */
10686 	if (cmdiocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
10687 	    cmdiocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN ||
10688 	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
10689 		return 0;
10690 
10691 	/* issue ABTS for this io based on iotag */
10692 	abtsiocbp = __lpfc_sli_get_iocbq(phba);
10693 	if (abtsiocbp == NULL)
10694 		return 0;
10695 
10696 	/* This signals the response to set the correct status
10697 	 * before calling the completion handler
10698 	 */
10699 	cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
10700 
10701 	/* Complete prepping the abort wqe and issue to the FW. */
10702 	abts_wqe = &abtsiocbp->wqe;
10703 	bf_set(abort_cmd_ia, &abts_wqe->abort_cmd, 0);
10704 	bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
10705 
10706 	/* Explicitly set reserved fields to zero.*/
10707 	abts_wqe->abort_cmd.rsrvd4 = 0;
10708 	abts_wqe->abort_cmd.rsrvd5 = 0;
10709 
10710 	/* WQE Common - word 6.  Context is XRI tag.  Set 0. */
10711 	bf_set(wqe_xri_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10712 	bf_set(wqe_ctxt_tag, &abts_wqe->abort_cmd.wqe_com, 0);
10713 
10714 	/* word 7 */
10715 	bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
10716 	bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
10717 	bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
10718 	       cmdiocb->iocb.ulpClass);
10719 
10720 	/* word 8 - tell the FW to abort the IO associated with this
10721 	 * outstanding exchange ID.
10722 	 */
10723 	abts_wqe->abort_cmd.wqe_com.abort_tag = cmdiocb->sli4_xritag;
10724 
10725 	/* word 9 - this is the iotag for the abts_wqe completion. */
10726 	bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
10727 	       abtsiocbp->iotag);
10728 
10729 	/* word 10 */
10730 	bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, cmdiocb->hba_wqidx);
10731 	bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
10732 	bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
10733 
10734 	/* word 11 */
10735 	bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
10736 	bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
10737 	bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
10738 
10739 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
10740 	abtsiocbp->iocb_flag |= LPFC_IO_NVME;
10741 	abtsiocbp->vport = vport;
10742 	abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
10743 	retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
10744 	if (retval) {
10745 		lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10746 				 "6147 Failed abts issue_wqe with status x%x "
10747 				 "for oxid x%x\n",
10748 				 retval, cmdiocb->sli4_xritag);
10749 		lpfc_sli_release_iocbq(phba, abtsiocbp);
10750 		return retval;
10751 	}
10752 
10753 	lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
10754 			 "6148 Drv Abort NVME Request Issued for "
10755 			 "ox_id x%x on reqtag x%x\n",
10756 			 cmdiocb->sli4_xritag,
10757 			 abtsiocbp->iotag);
10758 
10759 	return retval;
10760 }
10761 
10762 /**
10763  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
10764  * @phba: pointer to lpfc HBA data structure.
10765  *
10766  * This routine will abort all pending and outstanding iocbs to an HBA.
10767  **/
10768 void
10769 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
10770 {
10771 	struct lpfc_sli *psli = &phba->sli;
10772 	struct lpfc_sli_ring *pring;
10773 	struct lpfc_queue *qp = NULL;
10774 	int i;
10775 
10776 	if (phba->sli_rev != LPFC_SLI_REV4) {
10777 		for (i = 0; i < psli->num_rings; i++) {
10778 			pring = &psli->sli3_ring[i];
10779 			lpfc_sli_abort_iocb_ring(phba, pring);
10780 		}
10781 		return;
10782 	}
10783 	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
10784 		pring = qp->pring;
10785 		if (!pring)
10786 			continue;
10787 		lpfc_sli_abort_iocb_ring(phba, pring);
10788 	}
10789 }
10790 
10791 /**
10792  * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
10793  * @iocbq: Pointer to driver iocb object.
10794  * @vport: Pointer to driver virtual port object.
10795  * @tgt_id: SCSI ID of the target.
10796  * @lun_id: LUN ID of the scsi device.
10797  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
10798  *
10799  * This function acts as an iocb filter for functions which abort or count
10800  * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
10801  * 0 if the filtering criteria is met for the given iocb and will return
10802  * 1 if the filtering criteria is not met.
10803  * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
10804  * given iocb is for the SCSI device specified by vport, tgt_id and
10805  * lun_id parameter.
10806  * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
10807  * given iocb is for the SCSI target specified by vport and tgt_id
10808  * parameters.
10809  * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
10810  * given iocb is for the SCSI host associated with the given vport.
10811  * This function is called with no locks held.
10812  **/
10813 static int
10814 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
10815 			   uint16_t tgt_id, uint64_t lun_id,
10816 			   lpfc_ctx_cmd ctx_cmd)
10817 {
10818 	struct lpfc_scsi_buf *lpfc_cmd;
10819 	int rc = 1;
10820 
10821 	if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
10822 		return rc;
10823 
10824 	if (iocbq->vport != vport)
10825 		return rc;
10826 
10827 	lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
10828 
10829 	if (lpfc_cmd->pCmd == NULL)
10830 		return rc;
10831 
10832 	switch (ctx_cmd) {
10833 	case LPFC_CTX_LUN:
10834 		if ((lpfc_cmd->rdata->pnode) &&
10835 		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
10836 		    (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
10837 			rc = 0;
10838 		break;
10839 	case LPFC_CTX_TGT:
10840 		if ((lpfc_cmd->rdata->pnode) &&
10841 		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
10842 			rc = 0;
10843 		break;
10844 	case LPFC_CTX_HOST:
10845 		rc = 0;
10846 		break;
10847 	default:
10848 		printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
10849 			__func__, ctx_cmd);
10850 		break;
10851 	}
10852 
10853 	return rc;
10854 }
10855 
10856 /**
10857  * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
10858  * @vport: Pointer to virtual port.
10859  * @tgt_id: SCSI ID of the target.
10860  * @lun_id: LUN ID of the scsi device.
10861  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10862  *
10863  * This function returns number of FCP commands pending for the vport.
10864  * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
10865  * commands pending on the vport associated with SCSI device specified
10866  * by tgt_id and lun_id parameters.
10867  * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
10868  * commands pending on the vport associated with SCSI target specified
10869  * by tgt_id parameter.
10870  * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
10871  * commands pending on the vport.
10872  * This function returns the number of iocbs which satisfy the filter.
10873  * This function is called without any lock held.
10874  **/
10875 int
10876 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
10877 		  lpfc_ctx_cmd ctx_cmd)
10878 {
10879 	struct lpfc_hba *phba = vport->phba;
10880 	struct lpfc_iocbq *iocbq;
10881 	int sum, i;
10882 
10883 	spin_lock_irq(&phba->hbalock);
10884 	for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
10885 		iocbq = phba->sli.iocbq_lookup[i];
10886 
10887 		if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
10888 						ctx_cmd) == 0)
10889 			sum++;
10890 	}
10891 	spin_unlock_irq(&phba->hbalock);
10892 
10893 	return sum;
10894 }
10895 
10896 /**
10897  * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
10898  * @phba: Pointer to HBA context object
10899  * @cmdiocb: Pointer to command iocb object.
10900  * @rspiocb: Pointer to response iocb object.
10901  *
10902  * This function is called when an aborted FCP iocb completes. This
10903  * function is called by the ring event handler with no lock held.
10904  * This function frees the iocb.
10905  **/
10906 void
10907 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
10908 			struct lpfc_iocbq *rspiocb)
10909 {
10910 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10911 			"3096 ABORT_XRI_CN completing on rpi x%x "
10912 			"original iotag x%x, abort cmd iotag x%x "
10913 			"status 0x%x, reason 0x%x\n",
10914 			cmdiocb->iocb.un.acxri.abortContextTag,
10915 			cmdiocb->iocb.un.acxri.abortIoTag,
10916 			cmdiocb->iotag, rspiocb->iocb.ulpStatus,
10917 			rspiocb->iocb.un.ulpWord[4]);
10918 	lpfc_sli_release_iocbq(phba, cmdiocb);
10919 	return;
10920 }
10921 
10922 /**
10923  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
10924  * @vport: Pointer to virtual port.
10925  * @pring: Pointer to driver SLI ring object.
10926  * @tgt_id: SCSI ID of the target.
10927  * @lun_id: LUN ID of the scsi device.
10928  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10929  *
10930  * This function sends an abort command for every SCSI command
10931  * associated with the given virtual port pending on the ring
10932  * filtered by lpfc_sli_validate_fcp_iocb function.
10933  * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
10934  * FCP iocbs associated with lun specified by tgt_id and lun_id
10935  * parameters
10936  * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
10937  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10938  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
10939  * FCP iocbs associated with virtual port.
10940  * This function returns number of iocbs it failed to abort.
10941  * This function is called with no locks held.
10942  **/
10943 int
10944 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
10945 		    uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
10946 {
10947 	struct lpfc_hba *phba = vport->phba;
10948 	struct lpfc_iocbq *iocbq;
10949 	struct lpfc_iocbq *abtsiocb;
10950 	struct lpfc_sli_ring *pring_s4;
10951 	IOCB_t *cmd = NULL;
10952 	int errcnt = 0, ret_val = 0;
10953 	int i;
10954 
10955 	for (i = 1; i <= phba->sli.last_iotag; i++) {
10956 		iocbq = phba->sli.iocbq_lookup[i];
10957 
10958 		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
10959 					       abort_cmd) != 0)
10960 			continue;
10961 
10962 		/*
10963 		 * If the iocbq is already being aborted, don't take a second
10964 		 * action, but do count it.
10965 		 */
10966 		if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
10967 			continue;
10968 
10969 		/* issue ABTS for this IOCB based on iotag */
10970 		abtsiocb = lpfc_sli_get_iocbq(phba);
10971 		if (abtsiocb == NULL) {
10972 			errcnt++;
10973 			continue;
10974 		}
10975 
10976 		/* indicate the IO is being aborted by the driver. */
10977 		iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
10978 
10979 		cmd = &iocbq->iocb;
10980 		abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
10981 		abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
10982 		if (phba->sli_rev == LPFC_SLI_REV4)
10983 			abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
10984 		else
10985 			abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
10986 		abtsiocb->iocb.ulpLe = 1;
10987 		abtsiocb->iocb.ulpClass = cmd->ulpClass;
10988 		abtsiocb->vport = vport;
10989 
10990 		/* ABTS WQE must go to the same WQ as the WQE to be aborted */
10991 		abtsiocb->hba_wqidx = iocbq->hba_wqidx;
10992 		if (iocbq->iocb_flag & LPFC_IO_FCP)
10993 			abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
10994 		if (iocbq->iocb_flag & LPFC_IO_FOF)
10995 			abtsiocb->iocb_flag |= LPFC_IO_FOF;
10996 
10997 		if (lpfc_is_link_up(phba))
10998 			abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
10999 		else
11000 			abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11001 
11002 		/* Setup callback routine and issue the command. */
11003 		abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11004 		if (phba->sli_rev == LPFC_SLI_REV4) {
11005 			pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11006 			if (!pring_s4)
11007 				continue;
11008 			ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11009 						      abtsiocb, 0);
11010 		} else
11011 			ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
11012 						      abtsiocb, 0);
11013 		if (ret_val == IOCB_ERROR) {
11014 			lpfc_sli_release_iocbq(phba, abtsiocb);
11015 			errcnt++;
11016 			continue;
11017 		}
11018 	}
11019 
11020 	return errcnt;
11021 }
11022 
11023 /**
11024  * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11025  * @vport: Pointer to virtual port.
11026  * @pring: Pointer to driver SLI ring object.
11027  * @tgt_id: SCSI ID of the target.
11028  * @lun_id: LUN ID of the scsi device.
11029  * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11030  *
11031  * This function sends an abort command for every SCSI command
11032  * associated with the given virtual port pending on the ring
11033  * filtered by lpfc_sli_validate_fcp_iocb function.
11034  * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11035  * FCP iocbs associated with lun specified by tgt_id and lun_id
11036  * parameters
11037  * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11038  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11039  * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11040  * FCP iocbs associated with virtual port.
11041  * This function returns number of iocbs it aborted .
11042  * This function is called with no locks held right after a taskmgmt
11043  * command is sent.
11044  **/
11045 int
11046 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
11047 			uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
11048 {
11049 	struct lpfc_hba *phba = vport->phba;
11050 	struct lpfc_scsi_buf *lpfc_cmd;
11051 	struct lpfc_iocbq *abtsiocbq;
11052 	struct lpfc_nodelist *ndlp;
11053 	struct lpfc_iocbq *iocbq;
11054 	IOCB_t *icmd;
11055 	int sum, i, ret_val;
11056 	unsigned long iflags;
11057 	struct lpfc_sli_ring *pring_s4;
11058 
11059 	spin_lock_irq(&phba->hbalock);
11060 
11061 	/* all I/Os are in process of being flushed */
11062 	if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) {
11063 		spin_unlock_irq(&phba->hbalock);
11064 		return 0;
11065 	}
11066 	sum = 0;
11067 
11068 	for (i = 1; i <= phba->sli.last_iotag; i++) {
11069 		iocbq = phba->sli.iocbq_lookup[i];
11070 
11071 		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
11072 					       cmd) != 0)
11073 			continue;
11074 
11075 		/*
11076 		 * If the iocbq is already being aborted, don't take a second
11077 		 * action, but do count it.
11078 		 */
11079 		if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED)
11080 			continue;
11081 
11082 		/* issue ABTS for this IOCB based on iotag */
11083 		abtsiocbq = __lpfc_sli_get_iocbq(phba);
11084 		if (abtsiocbq == NULL)
11085 			continue;
11086 
11087 		icmd = &iocbq->iocb;
11088 		abtsiocbq->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
11089 		abtsiocbq->iocb.un.acxri.abortContextTag = icmd->ulpContext;
11090 		if (phba->sli_rev == LPFC_SLI_REV4)
11091 			abtsiocbq->iocb.un.acxri.abortIoTag =
11092 							 iocbq->sli4_xritag;
11093 		else
11094 			abtsiocbq->iocb.un.acxri.abortIoTag = icmd->ulpIoTag;
11095 		abtsiocbq->iocb.ulpLe = 1;
11096 		abtsiocbq->iocb.ulpClass = icmd->ulpClass;
11097 		abtsiocbq->vport = vport;
11098 
11099 		/* ABTS WQE must go to the same WQ as the WQE to be aborted */
11100 		abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
11101 		if (iocbq->iocb_flag & LPFC_IO_FCP)
11102 			abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
11103 		if (iocbq->iocb_flag & LPFC_IO_FOF)
11104 			abtsiocbq->iocb_flag |= LPFC_IO_FOF;
11105 
11106 		lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
11107 		ndlp = lpfc_cmd->rdata->pnode;
11108 
11109 		if (lpfc_is_link_up(phba) &&
11110 		    (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
11111 			abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
11112 		else
11113 			abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
11114 
11115 		/* Setup callback routine and issue the command. */
11116 		abtsiocbq->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
11117 
11118 		/*
11119 		 * Indicate the IO is being aborted by the driver and set
11120 		 * the caller's flag into the aborted IO.
11121 		 */
11122 		iocbq->iocb_flag |= LPFC_DRIVER_ABORTED;
11123 
11124 		if (phba->sli_rev == LPFC_SLI_REV4) {
11125 			pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
11126 			if (pring_s4 == NULL)
11127 				continue;
11128 			/* Note: both hbalock and ring_lock must be set here */
11129 			spin_lock_irqsave(&pring_s4->ring_lock, iflags);
11130 			ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
11131 							abtsiocbq, 0);
11132 			spin_unlock_irqrestore(&pring_s4->ring_lock, iflags);
11133 		} else {
11134 			ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
11135 							abtsiocbq, 0);
11136 		}
11137 
11138 
11139 		if (ret_val == IOCB_ERROR)
11140 			__lpfc_sli_release_iocbq(phba, abtsiocbq);
11141 		else
11142 			sum++;
11143 	}
11144 	spin_unlock_irq(&phba->hbalock);
11145 	return sum;
11146 }
11147 
11148 /**
11149  * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11150  * @phba: Pointer to HBA context object.
11151  * @cmdiocbq: Pointer to command iocb.
11152  * @rspiocbq: Pointer to response iocb.
11153  *
11154  * This function is the completion handler for iocbs issued using
11155  * lpfc_sli_issue_iocb_wait function. This function is called by the
11156  * ring event handler function without any lock held. This function
11157  * can be called from both worker thread context and interrupt
11158  * context. This function also can be called from other thread which
11159  * cleans up the SLI layer objects.
11160  * This function copy the contents of the response iocb to the
11161  * response iocb memory object provided by the caller of
11162  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11163  * sleeps for the iocb completion.
11164  **/
11165 static void
11166 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
11167 			struct lpfc_iocbq *cmdiocbq,
11168 			struct lpfc_iocbq *rspiocbq)
11169 {
11170 	wait_queue_head_t *pdone_q;
11171 	unsigned long iflags;
11172 	struct lpfc_scsi_buf *lpfc_cmd;
11173 
11174 	spin_lock_irqsave(&phba->hbalock, iflags);
11175 	if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
11176 
11177 		/*
11178 		 * A time out has occurred for the iocb.  If a time out
11179 		 * completion handler has been supplied, call it.  Otherwise,
11180 		 * just free the iocbq.
11181 		 */
11182 
11183 		spin_unlock_irqrestore(&phba->hbalock, iflags);
11184 		cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
11185 		cmdiocbq->wait_iocb_cmpl = NULL;
11186 		if (cmdiocbq->iocb_cmpl)
11187 			(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
11188 		else
11189 			lpfc_sli_release_iocbq(phba, cmdiocbq);
11190 		return;
11191 	}
11192 
11193 	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
11194 	if (cmdiocbq->context2 && rspiocbq)
11195 		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
11196 		       &rspiocbq->iocb, sizeof(IOCB_t));
11197 
11198 	/* Set the exchange busy flag for task management commands */
11199 	if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
11200 		!(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
11201 		lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
11202 			cur_iocbq);
11203 		lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
11204 	}
11205 
11206 	pdone_q = cmdiocbq->context_un.wait_queue;
11207 	if (pdone_q)
11208 		wake_up(pdone_q);
11209 	spin_unlock_irqrestore(&phba->hbalock, iflags);
11210 	return;
11211 }
11212 
11213 /**
11214  * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11215  * @phba: Pointer to HBA context object..
11216  * @piocbq: Pointer to command iocb.
11217  * @flag: Flag to test.
11218  *
11219  * This routine grabs the hbalock and then test the iocb_flag to
11220  * see if the passed in flag is set.
11221  * Returns:
11222  * 1 if flag is set.
11223  * 0 if flag is not set.
11224  **/
11225 static int
11226 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
11227 		 struct lpfc_iocbq *piocbq, uint32_t flag)
11228 {
11229 	unsigned long iflags;
11230 	int ret;
11231 
11232 	spin_lock_irqsave(&phba->hbalock, iflags);
11233 	ret = piocbq->iocb_flag & flag;
11234 	spin_unlock_irqrestore(&phba->hbalock, iflags);
11235 	return ret;
11236 
11237 }
11238 
11239 /**
11240  * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
11241  * @phba: Pointer to HBA context object..
11242  * @pring: Pointer to sli ring.
11243  * @piocb: Pointer to command iocb.
11244  * @prspiocbq: Pointer to response iocb.
11245  * @timeout: Timeout in number of seconds.
11246  *
11247  * This function issues the iocb to firmware and waits for the
11248  * iocb to complete. The iocb_cmpl field of the shall be used
11249  * to handle iocbs which time out. If the field is NULL, the
11250  * function shall free the iocbq structure.  If more clean up is
11251  * needed, the caller is expected to provide a completion function
11252  * that will provide the needed clean up.  If the iocb command is
11253  * not completed within timeout seconds, the function will either
11254  * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11255  * completion function set in the iocb_cmpl field and then return
11256  * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
11257  * resources if this function returns IOCB_TIMEDOUT.
11258  * The function waits for the iocb completion using an
11259  * non-interruptible wait.
11260  * This function will sleep while waiting for iocb completion.
11261  * So, this function should not be called from any context which
11262  * does not allow sleeping. Due to the same reason, this function
11263  * cannot be called with interrupt disabled.
11264  * This function assumes that the iocb completions occur while
11265  * this function sleep. So, this function cannot be called from
11266  * the thread which process iocb completion for this ring.
11267  * This function clears the iocb_flag of the iocb object before
11268  * issuing the iocb and the iocb completion handler sets this
11269  * flag and wakes this thread when the iocb completes.
11270  * The contents of the response iocb will be copied to prspiocbq
11271  * by the completion handler when the command completes.
11272  * This function returns IOCB_SUCCESS when success.
11273  * This function is called with no lock held.
11274  **/
11275 int
11276 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
11277 			 uint32_t ring_number,
11278 			 struct lpfc_iocbq *piocb,
11279 			 struct lpfc_iocbq *prspiocbq,
11280 			 uint32_t timeout)
11281 {
11282 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11283 	long timeleft, timeout_req = 0;
11284 	int retval = IOCB_SUCCESS;
11285 	uint32_t creg_val;
11286 	struct lpfc_iocbq *iocb;
11287 	int txq_cnt = 0;
11288 	int txcmplq_cnt = 0;
11289 	struct lpfc_sli_ring *pring;
11290 	unsigned long iflags;
11291 	bool iocb_completed = true;
11292 
11293 	if (phba->sli_rev >= LPFC_SLI_REV4)
11294 		pring = lpfc_sli4_calc_ring(phba, piocb);
11295 	else
11296 		pring = &phba->sli.sli3_ring[ring_number];
11297 	/*
11298 	 * If the caller has provided a response iocbq buffer, then context2
11299 	 * is NULL or its an error.
11300 	 */
11301 	if (prspiocbq) {
11302 		if (piocb->context2)
11303 			return IOCB_ERROR;
11304 		piocb->context2 = prspiocbq;
11305 	}
11306 
11307 	piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
11308 	piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
11309 	piocb->context_un.wait_queue = &done_q;
11310 	piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
11311 
11312 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11313 		if (lpfc_readl(phba->HCregaddr, &creg_val))
11314 			return IOCB_ERROR;
11315 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
11316 		writel(creg_val, phba->HCregaddr);
11317 		readl(phba->HCregaddr); /* flush */
11318 	}
11319 
11320 	retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
11321 				     SLI_IOCB_RET_IOCB);
11322 	if (retval == IOCB_SUCCESS) {
11323 		timeout_req = msecs_to_jiffies(timeout * 1000);
11324 		timeleft = wait_event_timeout(done_q,
11325 				lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
11326 				timeout_req);
11327 		spin_lock_irqsave(&phba->hbalock, iflags);
11328 		if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
11329 
11330 			/*
11331 			 * IOCB timed out.  Inform the wake iocb wait
11332 			 * completion function and set local status
11333 			 */
11334 
11335 			iocb_completed = false;
11336 			piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
11337 		}
11338 		spin_unlock_irqrestore(&phba->hbalock, iflags);
11339 		if (iocb_completed) {
11340 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11341 					"0331 IOCB wake signaled\n");
11342 			/* Note: we are not indicating if the IOCB has a success
11343 			 * status or not - that's for the caller to check.
11344 			 * IOCB_SUCCESS means just that the command was sent and
11345 			 * completed. Not that it completed successfully.
11346 			 * */
11347 		} else if (timeleft == 0) {
11348 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11349 					"0338 IOCB wait timeout error - no "
11350 					"wake response Data x%x\n", timeout);
11351 			retval = IOCB_TIMEDOUT;
11352 		} else {
11353 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
11354 					"0330 IOCB wake NOT set, "
11355 					"Data x%x x%lx\n",
11356 					timeout, (timeleft / jiffies));
11357 			retval = IOCB_TIMEDOUT;
11358 		}
11359 	} else if (retval == IOCB_BUSY) {
11360 		if (phba->cfg_log_verbose & LOG_SLI) {
11361 			list_for_each_entry(iocb, &pring->txq, list) {
11362 				txq_cnt++;
11363 			}
11364 			list_for_each_entry(iocb, &pring->txcmplq, list) {
11365 				txcmplq_cnt++;
11366 			}
11367 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11368 				"2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
11369 				phba->iocb_cnt, txq_cnt, txcmplq_cnt);
11370 		}
11371 		return retval;
11372 	} else {
11373 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11374 				"0332 IOCB wait issue failed, Data x%x\n",
11375 				retval);
11376 		retval = IOCB_ERROR;
11377 	}
11378 
11379 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
11380 		if (lpfc_readl(phba->HCregaddr, &creg_val))
11381 			return IOCB_ERROR;
11382 		creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
11383 		writel(creg_val, phba->HCregaddr);
11384 		readl(phba->HCregaddr); /* flush */
11385 	}
11386 
11387 	if (prspiocbq)
11388 		piocb->context2 = NULL;
11389 
11390 	piocb->context_un.wait_queue = NULL;
11391 	piocb->iocb_cmpl = NULL;
11392 	return retval;
11393 }
11394 
11395 /**
11396  * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
11397  * @phba: Pointer to HBA context object.
11398  * @pmboxq: Pointer to driver mailbox object.
11399  * @timeout: Timeout in number of seconds.
11400  *
11401  * This function issues the mailbox to firmware and waits for the
11402  * mailbox command to complete. If the mailbox command is not
11403  * completed within timeout seconds, it returns MBX_TIMEOUT.
11404  * The function waits for the mailbox completion using an
11405  * interruptible wait. If the thread is woken up due to a
11406  * signal, MBX_TIMEOUT error is returned to the caller. Caller
11407  * should not free the mailbox resources, if this function returns
11408  * MBX_TIMEOUT.
11409  * This function will sleep while waiting for mailbox completion.
11410  * So, this function should not be called from any context which
11411  * does not allow sleeping. Due to the same reason, this function
11412  * cannot be called with interrupt disabled.
11413  * This function assumes that the mailbox completion occurs while
11414  * this function sleep. So, this function cannot be called from
11415  * the worker thread which processes mailbox completion.
11416  * This function is called in the context of HBA management
11417  * applications.
11418  * This function returns MBX_SUCCESS when successful.
11419  * This function is called with no lock held.
11420  **/
11421 int
11422 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
11423 			 uint32_t timeout)
11424 {
11425 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
11426 	MAILBOX_t *mb = NULL;
11427 	int retval;
11428 	unsigned long flag;
11429 
11430 	/* The caller might set context1 for extended buffer */
11431 	if (pmboxq->context1)
11432 		mb = (MAILBOX_t *)pmboxq->context1;
11433 
11434 	pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
11435 	/* setup wake call as IOCB callback */
11436 	pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
11437 	/* setup context field to pass wait_queue pointer to wake function  */
11438 	pmboxq->context1 = &done_q;
11439 
11440 	/* now issue the command */
11441 	retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
11442 	if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
11443 		wait_event_interruptible_timeout(done_q,
11444 				pmboxq->mbox_flag & LPFC_MBX_WAKE,
11445 				msecs_to_jiffies(timeout * 1000));
11446 
11447 		spin_lock_irqsave(&phba->hbalock, flag);
11448 		/* restore the possible extended buffer for free resource */
11449 		pmboxq->context1 = (uint8_t *)mb;
11450 		/*
11451 		 * if LPFC_MBX_WAKE flag is set the mailbox is completed
11452 		 * else do not free the resources.
11453 		 */
11454 		if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
11455 			retval = MBX_SUCCESS;
11456 		} else {
11457 			retval = MBX_TIMEOUT;
11458 			pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
11459 		}
11460 		spin_unlock_irqrestore(&phba->hbalock, flag);
11461 	} else {
11462 		/* restore the possible extended buffer for free resource */
11463 		pmboxq->context1 = (uint8_t *)mb;
11464 	}
11465 
11466 	return retval;
11467 }
11468 
11469 /**
11470  * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
11471  * @phba: Pointer to HBA context.
11472  *
11473  * This function is called to shutdown the driver's mailbox sub-system.
11474  * It first marks the mailbox sub-system is in a block state to prevent
11475  * the asynchronous mailbox command from issued off the pending mailbox
11476  * command queue. If the mailbox command sub-system shutdown is due to
11477  * HBA error conditions such as EEH or ERATT, this routine shall invoke
11478  * the mailbox sub-system flush routine to forcefully bring down the
11479  * mailbox sub-system. Otherwise, if it is due to normal condition (such
11480  * as with offline or HBA function reset), this routine will wait for the
11481  * outstanding mailbox command to complete before invoking the mailbox
11482  * sub-system flush routine to gracefully bring down mailbox sub-system.
11483  **/
11484 void
11485 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
11486 {
11487 	struct lpfc_sli *psli = &phba->sli;
11488 	unsigned long timeout;
11489 
11490 	if (mbx_action == LPFC_MBX_NO_WAIT) {
11491 		/* delay 100ms for port state */
11492 		msleep(100);
11493 		lpfc_sli_mbox_sys_flush(phba);
11494 		return;
11495 	}
11496 	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
11497 
11498 	spin_lock_irq(&phba->hbalock);
11499 	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
11500 
11501 	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
11502 		/* Determine how long we might wait for the active mailbox
11503 		 * command to be gracefully completed by firmware.
11504 		 */
11505 		if (phba->sli.mbox_active)
11506 			timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
11507 						phba->sli.mbox_active) *
11508 						1000) + jiffies;
11509 		spin_unlock_irq(&phba->hbalock);
11510 
11511 		while (phba->sli.mbox_active) {
11512 			/* Check active mailbox complete status every 2ms */
11513 			msleep(2);
11514 			if (time_after(jiffies, timeout))
11515 				/* Timeout, let the mailbox flush routine to
11516 				 * forcefully release active mailbox command
11517 				 */
11518 				break;
11519 		}
11520 	} else
11521 		spin_unlock_irq(&phba->hbalock);
11522 
11523 	lpfc_sli_mbox_sys_flush(phba);
11524 }
11525 
11526 /**
11527  * lpfc_sli_eratt_read - read sli-3 error attention events
11528  * @phba: Pointer to HBA context.
11529  *
11530  * This function is called to read the SLI3 device error attention registers
11531  * for possible error attention events. The caller must hold the hostlock
11532  * with spin_lock_irq().
11533  *
11534  * This function returns 1 when there is Error Attention in the Host Attention
11535  * Register and returns 0 otherwise.
11536  **/
11537 static int
11538 lpfc_sli_eratt_read(struct lpfc_hba *phba)
11539 {
11540 	uint32_t ha_copy;
11541 
11542 	/* Read chip Host Attention (HA) register */
11543 	if (lpfc_readl(phba->HAregaddr, &ha_copy))
11544 		goto unplug_err;
11545 
11546 	if (ha_copy & HA_ERATT) {
11547 		/* Read host status register to retrieve error event */
11548 		if (lpfc_sli_read_hs(phba))
11549 			goto unplug_err;
11550 
11551 		/* Check if there is a deferred error condition is active */
11552 		if ((HS_FFER1 & phba->work_hs) &&
11553 		    ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
11554 		      HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
11555 			phba->hba_flag |= DEFER_ERATT;
11556 			/* Clear all interrupt enable conditions */
11557 			writel(0, phba->HCregaddr);
11558 			readl(phba->HCregaddr);
11559 		}
11560 
11561 		/* Set the driver HA work bitmap */
11562 		phba->work_ha |= HA_ERATT;
11563 		/* Indicate polling handles this ERATT */
11564 		phba->hba_flag |= HBA_ERATT_HANDLED;
11565 		return 1;
11566 	}
11567 	return 0;
11568 
11569 unplug_err:
11570 	/* Set the driver HS work bitmap */
11571 	phba->work_hs |= UNPLUG_ERR;
11572 	/* Set the driver HA work bitmap */
11573 	phba->work_ha |= HA_ERATT;
11574 	/* Indicate polling handles this ERATT */
11575 	phba->hba_flag |= HBA_ERATT_HANDLED;
11576 	return 1;
11577 }
11578 
11579 /**
11580  * lpfc_sli4_eratt_read - read sli-4 error attention events
11581  * @phba: Pointer to HBA context.
11582  *
11583  * This function is called to read the SLI4 device error attention registers
11584  * for possible error attention events. The caller must hold the hostlock
11585  * with spin_lock_irq().
11586  *
11587  * This function returns 1 when there is Error Attention in the Host Attention
11588  * Register and returns 0 otherwise.
11589  **/
11590 static int
11591 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
11592 {
11593 	uint32_t uerr_sta_hi, uerr_sta_lo;
11594 	uint32_t if_type, portsmphr;
11595 	struct lpfc_register portstat_reg;
11596 
11597 	/*
11598 	 * For now, use the SLI4 device internal unrecoverable error
11599 	 * registers for error attention. This can be changed later.
11600 	 */
11601 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11602 	switch (if_type) {
11603 	case LPFC_SLI_INTF_IF_TYPE_0:
11604 		if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
11605 			&uerr_sta_lo) ||
11606 			lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
11607 			&uerr_sta_hi)) {
11608 			phba->work_hs |= UNPLUG_ERR;
11609 			phba->work_ha |= HA_ERATT;
11610 			phba->hba_flag |= HBA_ERATT_HANDLED;
11611 			return 1;
11612 		}
11613 		if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
11614 		    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
11615 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11616 					"1423 HBA Unrecoverable error: "
11617 					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
11618 					"ue_mask_lo_reg=0x%x, "
11619 					"ue_mask_hi_reg=0x%x\n",
11620 					uerr_sta_lo, uerr_sta_hi,
11621 					phba->sli4_hba.ue_mask_lo,
11622 					phba->sli4_hba.ue_mask_hi);
11623 			phba->work_status[0] = uerr_sta_lo;
11624 			phba->work_status[1] = uerr_sta_hi;
11625 			phba->work_ha |= HA_ERATT;
11626 			phba->hba_flag |= HBA_ERATT_HANDLED;
11627 			return 1;
11628 		}
11629 		break;
11630 	case LPFC_SLI_INTF_IF_TYPE_2:
11631 		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
11632 			&portstat_reg.word0) ||
11633 			lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
11634 			&portsmphr)){
11635 			phba->work_hs |= UNPLUG_ERR;
11636 			phba->work_ha |= HA_ERATT;
11637 			phba->hba_flag |= HBA_ERATT_HANDLED;
11638 			return 1;
11639 		}
11640 		if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
11641 			phba->work_status[0] =
11642 				readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
11643 			phba->work_status[1] =
11644 				readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
11645 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11646 					"2885 Port Status Event: "
11647 					"port status reg 0x%x, "
11648 					"port smphr reg 0x%x, "
11649 					"error 1=0x%x, error 2=0x%x\n",
11650 					portstat_reg.word0,
11651 					portsmphr,
11652 					phba->work_status[0],
11653 					phba->work_status[1]);
11654 			phba->work_ha |= HA_ERATT;
11655 			phba->hba_flag |= HBA_ERATT_HANDLED;
11656 			return 1;
11657 		}
11658 		break;
11659 	case LPFC_SLI_INTF_IF_TYPE_1:
11660 	default:
11661 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11662 				"2886 HBA Error Attention on unsupported "
11663 				"if type %d.", if_type);
11664 		return 1;
11665 	}
11666 
11667 	return 0;
11668 }
11669 
11670 /**
11671  * lpfc_sli_check_eratt - check error attention events
11672  * @phba: Pointer to HBA context.
11673  *
11674  * This function is called from timer soft interrupt context to check HBA's
11675  * error attention register bit for error attention events.
11676  *
11677  * This function returns 1 when there is Error Attention in the Host Attention
11678  * Register and returns 0 otherwise.
11679  **/
11680 int
11681 lpfc_sli_check_eratt(struct lpfc_hba *phba)
11682 {
11683 	uint32_t ha_copy;
11684 
11685 	/* If somebody is waiting to handle an eratt, don't process it
11686 	 * here. The brdkill function will do this.
11687 	 */
11688 	if (phba->link_flag & LS_IGNORE_ERATT)
11689 		return 0;
11690 
11691 	/* Check if interrupt handler handles this ERATT */
11692 	spin_lock_irq(&phba->hbalock);
11693 	if (phba->hba_flag & HBA_ERATT_HANDLED) {
11694 		/* Interrupt handler has handled ERATT */
11695 		spin_unlock_irq(&phba->hbalock);
11696 		return 0;
11697 	}
11698 
11699 	/*
11700 	 * If there is deferred error attention, do not check for error
11701 	 * attention
11702 	 */
11703 	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11704 		spin_unlock_irq(&phba->hbalock);
11705 		return 0;
11706 	}
11707 
11708 	/* If PCI channel is offline, don't process it */
11709 	if (unlikely(pci_channel_offline(phba->pcidev))) {
11710 		spin_unlock_irq(&phba->hbalock);
11711 		return 0;
11712 	}
11713 
11714 	switch (phba->sli_rev) {
11715 	case LPFC_SLI_REV2:
11716 	case LPFC_SLI_REV3:
11717 		/* Read chip Host Attention (HA) register */
11718 		ha_copy = lpfc_sli_eratt_read(phba);
11719 		break;
11720 	case LPFC_SLI_REV4:
11721 		/* Read device Uncoverable Error (UERR) registers */
11722 		ha_copy = lpfc_sli4_eratt_read(phba);
11723 		break;
11724 	default:
11725 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11726 				"0299 Invalid SLI revision (%d)\n",
11727 				phba->sli_rev);
11728 		ha_copy = 0;
11729 		break;
11730 	}
11731 	spin_unlock_irq(&phba->hbalock);
11732 
11733 	return ha_copy;
11734 }
11735 
11736 /**
11737  * lpfc_intr_state_check - Check device state for interrupt handling
11738  * @phba: Pointer to HBA context.
11739  *
11740  * This inline routine checks whether a device or its PCI slot is in a state
11741  * that the interrupt should be handled.
11742  *
11743  * This function returns 0 if the device or the PCI slot is in a state that
11744  * interrupt should be handled, otherwise -EIO.
11745  */
11746 static inline int
11747 lpfc_intr_state_check(struct lpfc_hba *phba)
11748 {
11749 	/* If the pci channel is offline, ignore all the interrupts */
11750 	if (unlikely(pci_channel_offline(phba->pcidev)))
11751 		return -EIO;
11752 
11753 	/* Update device level interrupt statistics */
11754 	phba->sli.slistat.sli_intr++;
11755 
11756 	/* Ignore all interrupts during initialization. */
11757 	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
11758 		return -EIO;
11759 
11760 	return 0;
11761 }
11762 
11763 /**
11764  * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
11765  * @irq: Interrupt number.
11766  * @dev_id: The device context pointer.
11767  *
11768  * This function is directly called from the PCI layer as an interrupt
11769  * service routine when device with SLI-3 interface spec is enabled with
11770  * MSI-X multi-message interrupt mode and there are slow-path events in
11771  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
11772  * interrupt mode, this function is called as part of the device-level
11773  * interrupt handler. When the PCI slot is in error recovery or the HBA
11774  * is undergoing initialization, the interrupt handler will not process
11775  * the interrupt. The link attention and ELS ring attention events are
11776  * handled by the worker thread. The interrupt handler signals the worker
11777  * thread and returns for these events. This function is called without
11778  * any lock held. It gets the hbalock to access and update SLI data
11779  * structures.
11780  *
11781  * This function returns IRQ_HANDLED when interrupt is handled else it
11782  * returns IRQ_NONE.
11783  **/
11784 irqreturn_t
11785 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
11786 {
11787 	struct lpfc_hba  *phba;
11788 	uint32_t ha_copy, hc_copy;
11789 	uint32_t work_ha_copy;
11790 	unsigned long status;
11791 	unsigned long iflag;
11792 	uint32_t control;
11793 
11794 	MAILBOX_t *mbox, *pmbox;
11795 	struct lpfc_vport *vport;
11796 	struct lpfc_nodelist *ndlp;
11797 	struct lpfc_dmabuf *mp;
11798 	LPFC_MBOXQ_t *pmb;
11799 	int rc;
11800 
11801 	/*
11802 	 * Get the driver's phba structure from the dev_id and
11803 	 * assume the HBA is not interrupting.
11804 	 */
11805 	phba = (struct lpfc_hba *)dev_id;
11806 
11807 	if (unlikely(!phba))
11808 		return IRQ_NONE;
11809 
11810 	/*
11811 	 * Stuff needs to be attented to when this function is invoked as an
11812 	 * individual interrupt handler in MSI-X multi-message interrupt mode
11813 	 */
11814 	if (phba->intr_type == MSIX) {
11815 		/* Check device state for handling interrupt */
11816 		if (lpfc_intr_state_check(phba))
11817 			return IRQ_NONE;
11818 		/* Need to read HA REG for slow-path events */
11819 		spin_lock_irqsave(&phba->hbalock, iflag);
11820 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
11821 			goto unplug_error;
11822 		/* If somebody is waiting to handle an eratt don't process it
11823 		 * here. The brdkill function will do this.
11824 		 */
11825 		if (phba->link_flag & LS_IGNORE_ERATT)
11826 			ha_copy &= ~HA_ERATT;
11827 		/* Check the need for handling ERATT in interrupt handler */
11828 		if (ha_copy & HA_ERATT) {
11829 			if (phba->hba_flag & HBA_ERATT_HANDLED)
11830 				/* ERATT polling has handled ERATT */
11831 				ha_copy &= ~HA_ERATT;
11832 			else
11833 				/* Indicate interrupt handler handles ERATT */
11834 				phba->hba_flag |= HBA_ERATT_HANDLED;
11835 		}
11836 
11837 		/*
11838 		 * If there is deferred error attention, do not check for any
11839 		 * interrupt.
11840 		 */
11841 		if (unlikely(phba->hba_flag & DEFER_ERATT)) {
11842 			spin_unlock_irqrestore(&phba->hbalock, iflag);
11843 			return IRQ_NONE;
11844 		}
11845 
11846 		/* Clear up only attention source related to slow-path */
11847 		if (lpfc_readl(phba->HCregaddr, &hc_copy))
11848 			goto unplug_error;
11849 
11850 		writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
11851 			HC_LAINT_ENA | HC_ERINT_ENA),
11852 			phba->HCregaddr);
11853 		writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
11854 			phba->HAregaddr);
11855 		writel(hc_copy, phba->HCregaddr);
11856 		readl(phba->HAregaddr); /* flush */
11857 		spin_unlock_irqrestore(&phba->hbalock, iflag);
11858 	} else
11859 		ha_copy = phba->ha_copy;
11860 
11861 	work_ha_copy = ha_copy & phba->work_ha_mask;
11862 
11863 	if (work_ha_copy) {
11864 		if (work_ha_copy & HA_LATT) {
11865 			if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
11866 				/*
11867 				 * Turn off Link Attention interrupts
11868 				 * until CLEAR_LA done
11869 				 */
11870 				spin_lock_irqsave(&phba->hbalock, iflag);
11871 				phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
11872 				if (lpfc_readl(phba->HCregaddr, &control))
11873 					goto unplug_error;
11874 				control &= ~HC_LAINT_ENA;
11875 				writel(control, phba->HCregaddr);
11876 				readl(phba->HCregaddr); /* flush */
11877 				spin_unlock_irqrestore(&phba->hbalock, iflag);
11878 			}
11879 			else
11880 				work_ha_copy &= ~HA_LATT;
11881 		}
11882 
11883 		if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
11884 			/*
11885 			 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
11886 			 * the only slow ring.
11887 			 */
11888 			status = (work_ha_copy &
11889 				(HA_RXMASK  << (4*LPFC_ELS_RING)));
11890 			status >>= (4*LPFC_ELS_RING);
11891 			if (status & HA_RXMASK) {
11892 				spin_lock_irqsave(&phba->hbalock, iflag);
11893 				if (lpfc_readl(phba->HCregaddr, &control))
11894 					goto unplug_error;
11895 
11896 				lpfc_debugfs_slow_ring_trc(phba,
11897 				"ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
11898 				control, status,
11899 				(uint32_t)phba->sli.slistat.sli_intr);
11900 
11901 				if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
11902 					lpfc_debugfs_slow_ring_trc(phba,
11903 						"ISR Disable ring:"
11904 						"pwork:x%x hawork:x%x wait:x%x",
11905 						phba->work_ha, work_ha_copy,
11906 						(uint32_t)((unsigned long)
11907 						&phba->work_waitq));
11908 
11909 					control &=
11910 					    ~(HC_R0INT_ENA << LPFC_ELS_RING);
11911 					writel(control, phba->HCregaddr);
11912 					readl(phba->HCregaddr); /* flush */
11913 				}
11914 				else {
11915 					lpfc_debugfs_slow_ring_trc(phba,
11916 						"ISR slow ring:   pwork:"
11917 						"x%x hawork:x%x wait:x%x",
11918 						phba->work_ha, work_ha_copy,
11919 						(uint32_t)((unsigned long)
11920 						&phba->work_waitq));
11921 				}
11922 				spin_unlock_irqrestore(&phba->hbalock, iflag);
11923 			}
11924 		}
11925 		spin_lock_irqsave(&phba->hbalock, iflag);
11926 		if (work_ha_copy & HA_ERATT) {
11927 			if (lpfc_sli_read_hs(phba))
11928 				goto unplug_error;
11929 			/*
11930 			 * Check if there is a deferred error condition
11931 			 * is active
11932 			 */
11933 			if ((HS_FFER1 & phba->work_hs) &&
11934 				((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
11935 				  HS_FFER6 | HS_FFER7 | HS_FFER8) &
11936 				  phba->work_hs)) {
11937 				phba->hba_flag |= DEFER_ERATT;
11938 				/* Clear all interrupt enable conditions */
11939 				writel(0, phba->HCregaddr);
11940 				readl(phba->HCregaddr);
11941 			}
11942 		}
11943 
11944 		if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
11945 			pmb = phba->sli.mbox_active;
11946 			pmbox = &pmb->u.mb;
11947 			mbox = phba->mbox;
11948 			vport = pmb->vport;
11949 
11950 			/* First check out the status word */
11951 			lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
11952 			if (pmbox->mbxOwner != OWN_HOST) {
11953 				spin_unlock_irqrestore(&phba->hbalock, iflag);
11954 				/*
11955 				 * Stray Mailbox Interrupt, mbxCommand <cmd>
11956 				 * mbxStatus <status>
11957 				 */
11958 				lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
11959 						LOG_SLI,
11960 						"(%d):0304 Stray Mailbox "
11961 						"Interrupt mbxCommand x%x "
11962 						"mbxStatus x%x\n",
11963 						(vport ? vport->vpi : 0),
11964 						pmbox->mbxCommand,
11965 						pmbox->mbxStatus);
11966 				/* clear mailbox attention bit */
11967 				work_ha_copy &= ~HA_MBATT;
11968 			} else {
11969 				phba->sli.mbox_active = NULL;
11970 				spin_unlock_irqrestore(&phba->hbalock, iflag);
11971 				phba->last_completion_time = jiffies;
11972 				del_timer(&phba->sli.mbox_tmo);
11973 				if (pmb->mbox_cmpl) {
11974 					lpfc_sli_pcimem_bcopy(mbox, pmbox,
11975 							MAILBOX_CMD_SIZE);
11976 					if (pmb->out_ext_byte_len &&
11977 						pmb->context2)
11978 						lpfc_sli_pcimem_bcopy(
11979 						phba->mbox_ext,
11980 						pmb->context2,
11981 						pmb->out_ext_byte_len);
11982 				}
11983 				if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
11984 					pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
11985 
11986 					lpfc_debugfs_disc_trc(vport,
11987 						LPFC_DISC_TRC_MBOX_VPORT,
11988 						"MBOX dflt rpi: : "
11989 						"status:x%x rpi:x%x",
11990 						(uint32_t)pmbox->mbxStatus,
11991 						pmbox->un.varWords[0], 0);
11992 
11993 					if (!pmbox->mbxStatus) {
11994 						mp = (struct lpfc_dmabuf *)
11995 							(pmb->context1);
11996 						ndlp = (struct lpfc_nodelist *)
11997 							pmb->context2;
11998 
11999 						/* Reg_LOGIN of dflt RPI was
12000 						 * successful. new lets get
12001 						 * rid of the RPI using the
12002 						 * same mbox buffer.
12003 						 */
12004 						lpfc_unreg_login(phba,
12005 							vport->vpi,
12006 							pmbox->un.varWords[0],
12007 							pmb);
12008 						pmb->mbox_cmpl =
12009 							lpfc_mbx_cmpl_dflt_rpi;
12010 						pmb->context1 = mp;
12011 						pmb->context2 = ndlp;
12012 						pmb->vport = vport;
12013 						rc = lpfc_sli_issue_mbox(phba,
12014 								pmb,
12015 								MBX_NOWAIT);
12016 						if (rc != MBX_BUSY)
12017 							lpfc_printf_log(phba,
12018 							KERN_ERR,
12019 							LOG_MBOX | LOG_SLI,
12020 							"0350 rc should have"
12021 							"been MBX_BUSY\n");
12022 						if (rc != MBX_NOT_FINISHED)
12023 							goto send_current_mbox;
12024 					}
12025 				}
12026 				spin_lock_irqsave(
12027 						&phba->pport->work_port_lock,
12028 						iflag);
12029 				phba->pport->work_port_events &=
12030 					~WORKER_MBOX_TMO;
12031 				spin_unlock_irqrestore(
12032 						&phba->pport->work_port_lock,
12033 						iflag);
12034 				lpfc_mbox_cmpl_put(phba, pmb);
12035 			}
12036 		} else
12037 			spin_unlock_irqrestore(&phba->hbalock, iflag);
12038 
12039 		if ((work_ha_copy & HA_MBATT) &&
12040 		    (phba->sli.mbox_active == NULL)) {
12041 send_current_mbox:
12042 			/* Process next mailbox command if there is one */
12043 			do {
12044 				rc = lpfc_sli_issue_mbox(phba, NULL,
12045 							 MBX_NOWAIT);
12046 			} while (rc == MBX_NOT_FINISHED);
12047 			if (rc != MBX_SUCCESS)
12048 				lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12049 						LOG_SLI, "0349 rc should be "
12050 						"MBX_SUCCESS\n");
12051 		}
12052 
12053 		spin_lock_irqsave(&phba->hbalock, iflag);
12054 		phba->work_ha |= work_ha_copy;
12055 		spin_unlock_irqrestore(&phba->hbalock, iflag);
12056 		lpfc_worker_wake_up(phba);
12057 	}
12058 	return IRQ_HANDLED;
12059 unplug_error:
12060 	spin_unlock_irqrestore(&phba->hbalock, iflag);
12061 	return IRQ_HANDLED;
12062 
12063 } /* lpfc_sli_sp_intr_handler */
12064 
12065 /**
12066  * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12067  * @irq: Interrupt number.
12068  * @dev_id: The device context pointer.
12069  *
12070  * This function is directly called from the PCI layer as an interrupt
12071  * service routine when device with SLI-3 interface spec is enabled with
12072  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12073  * ring event in the HBA. However, when the device is enabled with either
12074  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12075  * device-level interrupt handler. When the PCI slot is in error recovery
12076  * or the HBA is undergoing initialization, the interrupt handler will not
12077  * process the interrupt. The SCSI FCP fast-path ring event are handled in
12078  * the intrrupt context. This function is called without any lock held.
12079  * It gets the hbalock to access and update SLI data structures.
12080  *
12081  * This function returns IRQ_HANDLED when interrupt is handled else it
12082  * returns IRQ_NONE.
12083  **/
12084 irqreturn_t
12085 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
12086 {
12087 	struct lpfc_hba  *phba;
12088 	uint32_t ha_copy;
12089 	unsigned long status;
12090 	unsigned long iflag;
12091 	struct lpfc_sli_ring *pring;
12092 
12093 	/* Get the driver's phba structure from the dev_id and
12094 	 * assume the HBA is not interrupting.
12095 	 */
12096 	phba = (struct lpfc_hba *) dev_id;
12097 
12098 	if (unlikely(!phba))
12099 		return IRQ_NONE;
12100 
12101 	/*
12102 	 * Stuff needs to be attented to when this function is invoked as an
12103 	 * individual interrupt handler in MSI-X multi-message interrupt mode
12104 	 */
12105 	if (phba->intr_type == MSIX) {
12106 		/* Check device state for handling interrupt */
12107 		if (lpfc_intr_state_check(phba))
12108 			return IRQ_NONE;
12109 		/* Need to read HA REG for FCP ring and other ring events */
12110 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
12111 			return IRQ_HANDLED;
12112 		/* Clear up only attention source related to fast-path */
12113 		spin_lock_irqsave(&phba->hbalock, iflag);
12114 		/*
12115 		 * If there is deferred error attention, do not check for
12116 		 * any interrupt.
12117 		 */
12118 		if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12119 			spin_unlock_irqrestore(&phba->hbalock, iflag);
12120 			return IRQ_NONE;
12121 		}
12122 		writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
12123 			phba->HAregaddr);
12124 		readl(phba->HAregaddr); /* flush */
12125 		spin_unlock_irqrestore(&phba->hbalock, iflag);
12126 	} else
12127 		ha_copy = phba->ha_copy;
12128 
12129 	/*
12130 	 * Process all events on FCP ring. Take the optimized path for FCP IO.
12131 	 */
12132 	ha_copy &= ~(phba->work_ha_mask);
12133 
12134 	status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12135 	status >>= (4*LPFC_FCP_RING);
12136 	pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12137 	if (status & HA_RXMASK)
12138 		lpfc_sli_handle_fast_ring_event(phba, pring, status);
12139 
12140 	if (phba->cfg_multi_ring_support == 2) {
12141 		/*
12142 		 * Process all events on extra ring. Take the optimized path
12143 		 * for extra ring IO.
12144 		 */
12145 		status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12146 		status >>= (4*LPFC_EXTRA_RING);
12147 		if (status & HA_RXMASK) {
12148 			lpfc_sli_handle_fast_ring_event(phba,
12149 					&phba->sli.sli3_ring[LPFC_EXTRA_RING],
12150 					status);
12151 		}
12152 	}
12153 	return IRQ_HANDLED;
12154 }  /* lpfc_sli_fp_intr_handler */
12155 
12156 /**
12157  * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12158  * @irq: Interrupt number.
12159  * @dev_id: The device context pointer.
12160  *
12161  * This function is the HBA device-level interrupt handler to device with
12162  * SLI-3 interface spec, called from the PCI layer when either MSI or
12163  * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12164  * requires driver attention. This function invokes the slow-path interrupt
12165  * attention handling function and fast-path interrupt attention handling
12166  * function in turn to process the relevant HBA attention events. This
12167  * function is called without any lock held. It gets the hbalock to access
12168  * and update SLI data structures.
12169  *
12170  * This function returns IRQ_HANDLED when interrupt is handled, else it
12171  * returns IRQ_NONE.
12172  **/
12173 irqreturn_t
12174 lpfc_sli_intr_handler(int irq, void *dev_id)
12175 {
12176 	struct lpfc_hba  *phba;
12177 	irqreturn_t sp_irq_rc, fp_irq_rc;
12178 	unsigned long status1, status2;
12179 	uint32_t hc_copy;
12180 
12181 	/*
12182 	 * Get the driver's phba structure from the dev_id and
12183 	 * assume the HBA is not interrupting.
12184 	 */
12185 	phba = (struct lpfc_hba *) dev_id;
12186 
12187 	if (unlikely(!phba))
12188 		return IRQ_NONE;
12189 
12190 	/* Check device state for handling interrupt */
12191 	if (lpfc_intr_state_check(phba))
12192 		return IRQ_NONE;
12193 
12194 	spin_lock(&phba->hbalock);
12195 	if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
12196 		spin_unlock(&phba->hbalock);
12197 		return IRQ_HANDLED;
12198 	}
12199 
12200 	if (unlikely(!phba->ha_copy)) {
12201 		spin_unlock(&phba->hbalock);
12202 		return IRQ_NONE;
12203 	} else if (phba->ha_copy & HA_ERATT) {
12204 		if (phba->hba_flag & HBA_ERATT_HANDLED)
12205 			/* ERATT polling has handled ERATT */
12206 			phba->ha_copy &= ~HA_ERATT;
12207 		else
12208 			/* Indicate interrupt handler handles ERATT */
12209 			phba->hba_flag |= HBA_ERATT_HANDLED;
12210 	}
12211 
12212 	/*
12213 	 * If there is deferred error attention, do not check for any interrupt.
12214 	 */
12215 	if (unlikely(phba->hba_flag & DEFER_ERATT)) {
12216 		spin_unlock(&phba->hbalock);
12217 		return IRQ_NONE;
12218 	}
12219 
12220 	/* Clear attention sources except link and error attentions */
12221 	if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
12222 		spin_unlock(&phba->hbalock);
12223 		return IRQ_HANDLED;
12224 	}
12225 	writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
12226 		| HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
12227 		phba->HCregaddr);
12228 	writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
12229 	writel(hc_copy, phba->HCregaddr);
12230 	readl(phba->HAregaddr); /* flush */
12231 	spin_unlock(&phba->hbalock);
12232 
12233 	/*
12234 	 * Invokes slow-path host attention interrupt handling as appropriate.
12235 	 */
12236 
12237 	/* status of events with mailbox and link attention */
12238 	status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
12239 
12240 	/* status of events with ELS ring */
12241 	status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
12242 	status2 >>= (4*LPFC_ELS_RING);
12243 
12244 	if (status1 || (status2 & HA_RXMASK))
12245 		sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
12246 	else
12247 		sp_irq_rc = IRQ_NONE;
12248 
12249 	/*
12250 	 * Invoke fast-path host attention interrupt handling as appropriate.
12251 	 */
12252 
12253 	/* status of events with FCP ring */
12254 	status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
12255 	status1 >>= (4*LPFC_FCP_RING);
12256 
12257 	/* status of events with extra ring */
12258 	if (phba->cfg_multi_ring_support == 2) {
12259 		status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
12260 		status2 >>= (4*LPFC_EXTRA_RING);
12261 	} else
12262 		status2 = 0;
12263 
12264 	if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
12265 		fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
12266 	else
12267 		fp_irq_rc = IRQ_NONE;
12268 
12269 	/* Return device-level interrupt handling status */
12270 	return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
12271 }  /* lpfc_sli_intr_handler */
12272 
12273 /**
12274  * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
12275  * @phba: pointer to lpfc hba data structure.
12276  *
12277  * This routine is invoked by the worker thread to process all the pending
12278  * SLI4 FCP abort XRI events.
12279  **/
12280 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
12281 {
12282 	struct lpfc_cq_event *cq_event;
12283 
12284 	/* First, declare the fcp xri abort event has been handled */
12285 	spin_lock_irq(&phba->hbalock);
12286 	phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
12287 	spin_unlock_irq(&phba->hbalock);
12288 	/* Now, handle all the fcp xri abort events */
12289 	while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
12290 		/* Get the first event from the head of the event queue */
12291 		spin_lock_irq(&phba->hbalock);
12292 		list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
12293 				 cq_event, struct lpfc_cq_event, list);
12294 		spin_unlock_irq(&phba->hbalock);
12295 		/* Notify aborted XRI for FCP work queue */
12296 		lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12297 		/* Free the event processed back to the free pool */
12298 		lpfc_sli4_cq_event_release(phba, cq_event);
12299 	}
12300 }
12301 
12302 /**
12303  * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
12304  * @phba: pointer to lpfc hba data structure.
12305  *
12306  * This routine is invoked by the worker thread to process all the pending
12307  * SLI4 NVME abort XRI events.
12308  **/
12309 void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
12310 {
12311 	struct lpfc_cq_event *cq_event;
12312 
12313 	/* First, declare the fcp xri abort event has been handled */
12314 	spin_lock_irq(&phba->hbalock);
12315 	phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
12316 	spin_unlock_irq(&phba->hbalock);
12317 	/* Now, handle all the fcp xri abort events */
12318 	while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
12319 		/* Get the first event from the head of the event queue */
12320 		spin_lock_irq(&phba->hbalock);
12321 		list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
12322 				 cq_event, struct lpfc_cq_event, list);
12323 		spin_unlock_irq(&phba->hbalock);
12324 		/* Notify aborted XRI for NVME work queue */
12325 		if (phba->nvmet_support) {
12326 			lpfc_sli4_nvmet_xri_aborted(phba,
12327 						    &cq_event->cqe.wcqe_axri);
12328 		} else {
12329 			lpfc_sli4_nvme_xri_aborted(phba,
12330 						   &cq_event->cqe.wcqe_axri);
12331 		}
12332 		/* Free the event processed back to the free pool */
12333 		lpfc_sli4_cq_event_release(phba, cq_event);
12334 	}
12335 }
12336 
12337 /**
12338  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12339  * @phba: pointer to lpfc hba data structure.
12340  *
12341  * This routine is invoked by the worker thread to process all the pending
12342  * SLI4 els abort xri events.
12343  **/
12344 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
12345 {
12346 	struct lpfc_cq_event *cq_event;
12347 
12348 	/* First, declare the els xri abort event has been handled */
12349 	spin_lock_irq(&phba->hbalock);
12350 	phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
12351 	spin_unlock_irq(&phba->hbalock);
12352 	/* Now, handle all the els xri abort events */
12353 	while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
12354 		/* Get the first event from the head of the event queue */
12355 		spin_lock_irq(&phba->hbalock);
12356 		list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
12357 				 cq_event, struct lpfc_cq_event, list);
12358 		spin_unlock_irq(&phba->hbalock);
12359 		/* Notify aborted XRI for ELS work queue */
12360 		lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
12361 		/* Free the event processed back to the free pool */
12362 		lpfc_sli4_cq_event_release(phba, cq_event);
12363 	}
12364 }
12365 
12366 /**
12367  * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12368  * @phba: pointer to lpfc hba data structure
12369  * @pIocbIn: pointer to the rspiocbq
12370  * @pIocbOut: pointer to the cmdiocbq
12371  * @wcqe: pointer to the complete wcqe
12372  *
12373  * This routine transfers the fields of a command iocbq to a response iocbq
12374  * by copying all the IOCB fields from command iocbq and transferring the
12375  * completion status information from the complete wcqe.
12376  **/
12377 static void
12378 lpfc_sli4_iocb_param_transfer(struct lpfc_hba *phba,
12379 			      struct lpfc_iocbq *pIocbIn,
12380 			      struct lpfc_iocbq *pIocbOut,
12381 			      struct lpfc_wcqe_complete *wcqe)
12382 {
12383 	int numBdes, i;
12384 	unsigned long iflags;
12385 	uint32_t status, max_response;
12386 	struct lpfc_dmabuf *dmabuf;
12387 	struct ulp_bde64 *bpl, bde;
12388 	size_t offset = offsetof(struct lpfc_iocbq, iocb);
12389 
12390 	memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
12391 	       sizeof(struct lpfc_iocbq) - offset);
12392 	/* Map WCQE parameters into irspiocb parameters */
12393 	status = bf_get(lpfc_wcqe_c_status, wcqe);
12394 	pIocbIn->iocb.ulpStatus = (status & LPFC_IOCB_STATUS_MASK);
12395 	if (pIocbOut->iocb_flag & LPFC_IO_FCP)
12396 		if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
12397 			pIocbIn->iocb.un.fcpi.fcpi_parm =
12398 					pIocbOut->iocb.un.fcpi.fcpi_parm -
12399 					wcqe->total_data_placed;
12400 		else
12401 			pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12402 	else {
12403 		pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
12404 		switch (pIocbOut->iocb.ulpCommand) {
12405 		case CMD_ELS_REQUEST64_CR:
12406 			dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12407 			bpl  = (struct ulp_bde64 *)dmabuf->virt;
12408 			bde.tus.w = le32_to_cpu(bpl[1].tus.w);
12409 			max_response = bde.tus.f.bdeSize;
12410 			break;
12411 		case CMD_GEN_REQUEST64_CR:
12412 			max_response = 0;
12413 			if (!pIocbOut->context3)
12414 				break;
12415 			numBdes = pIocbOut->iocb.un.genreq64.bdl.bdeSize/
12416 					sizeof(struct ulp_bde64);
12417 			dmabuf = (struct lpfc_dmabuf *)pIocbOut->context3;
12418 			bpl = (struct ulp_bde64 *)dmabuf->virt;
12419 			for (i = 0; i < numBdes; i++) {
12420 				bde.tus.w = le32_to_cpu(bpl[i].tus.w);
12421 				if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
12422 					max_response += bde.tus.f.bdeSize;
12423 			}
12424 			break;
12425 		default:
12426 			max_response = wcqe->total_data_placed;
12427 			break;
12428 		}
12429 		if (max_response < wcqe->total_data_placed)
12430 			pIocbIn->iocb.un.genreq64.bdl.bdeSize = max_response;
12431 		else
12432 			pIocbIn->iocb.un.genreq64.bdl.bdeSize =
12433 				wcqe->total_data_placed;
12434 	}
12435 
12436 	/* Convert BG errors for completion status */
12437 	if (status == CQE_STATUS_DI_ERROR) {
12438 		pIocbIn->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
12439 
12440 		if (bf_get(lpfc_wcqe_c_bg_edir, wcqe))
12441 			pIocbIn->iocb.un.ulpWord[4] = IOERR_RX_DMA_FAILED;
12442 		else
12443 			pIocbIn->iocb.un.ulpWord[4] = IOERR_TX_DMA_FAILED;
12444 
12445 		pIocbIn->iocb.unsli3.sli3_bg.bgstat = 0;
12446 		if (bf_get(lpfc_wcqe_c_bg_ge, wcqe)) /* Guard Check failed */
12447 			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12448 				BGS_GUARD_ERR_MASK;
12449 		if (bf_get(lpfc_wcqe_c_bg_ae, wcqe)) /* App Tag Check failed */
12450 			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12451 				BGS_APPTAG_ERR_MASK;
12452 		if (bf_get(lpfc_wcqe_c_bg_re, wcqe)) /* Ref Tag Check failed */
12453 			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12454 				BGS_REFTAG_ERR_MASK;
12455 
12456 		/* Check to see if there was any good data before the error */
12457 		if (bf_get(lpfc_wcqe_c_bg_tdpv, wcqe)) {
12458 			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12459 				BGS_HI_WATER_MARK_PRESENT_MASK;
12460 			pIocbIn->iocb.unsli3.sli3_bg.bghm =
12461 				wcqe->total_data_placed;
12462 		}
12463 
12464 		/*
12465 		* Set ALL the error bits to indicate we don't know what
12466 		* type of error it is.
12467 		*/
12468 		if (!pIocbIn->iocb.unsli3.sli3_bg.bgstat)
12469 			pIocbIn->iocb.unsli3.sli3_bg.bgstat |=
12470 				(BGS_REFTAG_ERR_MASK | BGS_APPTAG_ERR_MASK |
12471 				BGS_GUARD_ERR_MASK);
12472 	}
12473 
12474 	/* Pick up HBA exchange busy condition */
12475 	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
12476 		spin_lock_irqsave(&phba->hbalock, iflags);
12477 		pIocbIn->iocb_flag |= LPFC_EXCHANGE_BUSY;
12478 		spin_unlock_irqrestore(&phba->hbalock, iflags);
12479 	}
12480 }
12481 
12482 /**
12483  * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
12484  * @phba: Pointer to HBA context object.
12485  * @wcqe: Pointer to work-queue completion queue entry.
12486  *
12487  * This routine handles an ELS work-queue completion event and construct
12488  * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
12489  * discovery engine to handle.
12490  *
12491  * Return: Pointer to the receive IOCBQ, NULL otherwise.
12492  **/
12493 static struct lpfc_iocbq *
12494 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba *phba,
12495 			       struct lpfc_iocbq *irspiocbq)
12496 {
12497 	struct lpfc_sli_ring *pring;
12498 	struct lpfc_iocbq *cmdiocbq;
12499 	struct lpfc_wcqe_complete *wcqe;
12500 	unsigned long iflags;
12501 
12502 	pring = lpfc_phba_elsring(phba);
12503 
12504 	wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
12505 	spin_lock_irqsave(&pring->ring_lock, iflags);
12506 	pring->stats.iocb_event++;
12507 	/* Look up the ELS command IOCB and create pseudo response IOCB */
12508 	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
12509 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
12510 	/* Put the iocb back on the txcmplq */
12511 	lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
12512 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
12513 
12514 	if (unlikely(!cmdiocbq)) {
12515 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12516 				"0386 ELS complete with no corresponding "
12517 				"cmdiocb: iotag (%d)\n",
12518 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
12519 		lpfc_sli_release_iocbq(phba, irspiocbq);
12520 		return NULL;
12521 	}
12522 
12523 	/* Fake the irspiocbq and copy necessary response information */
12524 	lpfc_sli4_iocb_param_transfer(phba, irspiocbq, cmdiocbq, wcqe);
12525 
12526 	return irspiocbq;
12527 }
12528 
12529 /**
12530  * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
12531  * @phba: Pointer to HBA context object.
12532  * @cqe: Pointer to mailbox completion queue entry.
12533  *
12534  * This routine process a mailbox completion queue entry with asynchrous
12535  * event.
12536  *
12537  * Return: true if work posted to worker thread, otherwise false.
12538  **/
12539 static bool
12540 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12541 {
12542 	struct lpfc_cq_event *cq_event;
12543 	unsigned long iflags;
12544 
12545 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12546 			"0392 Async Event: word0:x%x, word1:x%x, "
12547 			"word2:x%x, word3:x%x\n", mcqe->word0,
12548 			mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
12549 
12550 	/* Allocate a new internal CQ_EVENT entry */
12551 	cq_event = lpfc_sli4_cq_event_alloc(phba);
12552 	if (!cq_event) {
12553 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12554 				"0394 Failed to allocate CQ_EVENT entry\n");
12555 		return false;
12556 	}
12557 
12558 	/* Move the CQE into an asynchronous event entry */
12559 	memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
12560 	spin_lock_irqsave(&phba->hbalock, iflags);
12561 	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
12562 	/* Set the async event flag */
12563 	phba->hba_flag |= ASYNC_EVENT;
12564 	spin_unlock_irqrestore(&phba->hbalock, iflags);
12565 
12566 	return true;
12567 }
12568 
12569 /**
12570  * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
12571  * @phba: Pointer to HBA context object.
12572  * @cqe: Pointer to mailbox completion queue entry.
12573  *
12574  * This routine process a mailbox completion queue entry with mailbox
12575  * completion event.
12576  *
12577  * Return: true if work posted to worker thread, otherwise false.
12578  **/
12579 static bool
12580 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
12581 {
12582 	uint32_t mcqe_status;
12583 	MAILBOX_t *mbox, *pmbox;
12584 	struct lpfc_mqe *mqe;
12585 	struct lpfc_vport *vport;
12586 	struct lpfc_nodelist *ndlp;
12587 	struct lpfc_dmabuf *mp;
12588 	unsigned long iflags;
12589 	LPFC_MBOXQ_t *pmb;
12590 	bool workposted = false;
12591 	int rc;
12592 
12593 	/* If not a mailbox complete MCQE, out by checking mailbox consume */
12594 	if (!bf_get(lpfc_trailer_completed, mcqe))
12595 		goto out_no_mqe_complete;
12596 
12597 	/* Get the reference to the active mbox command */
12598 	spin_lock_irqsave(&phba->hbalock, iflags);
12599 	pmb = phba->sli.mbox_active;
12600 	if (unlikely(!pmb)) {
12601 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
12602 				"1832 No pending MBOX command to handle\n");
12603 		spin_unlock_irqrestore(&phba->hbalock, iflags);
12604 		goto out_no_mqe_complete;
12605 	}
12606 	spin_unlock_irqrestore(&phba->hbalock, iflags);
12607 	mqe = &pmb->u.mqe;
12608 	pmbox = (MAILBOX_t *)&pmb->u.mqe;
12609 	mbox = phba->mbox;
12610 	vport = pmb->vport;
12611 
12612 	/* Reset heartbeat timer */
12613 	phba->last_completion_time = jiffies;
12614 	del_timer(&phba->sli.mbox_tmo);
12615 
12616 	/* Move mbox data to caller's mailbox region, do endian swapping */
12617 	if (pmb->mbox_cmpl && mbox)
12618 		lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
12619 
12620 	/*
12621 	 * For mcqe errors, conditionally move a modified error code to
12622 	 * the mbox so that the error will not be missed.
12623 	 */
12624 	mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
12625 	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
12626 		if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
12627 			bf_set(lpfc_mqe_status, mqe,
12628 			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
12629 	}
12630 	if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
12631 		pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
12632 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
12633 				      "MBOX dflt rpi: status:x%x rpi:x%x",
12634 				      mcqe_status,
12635 				      pmbox->un.varWords[0], 0);
12636 		if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
12637 			mp = (struct lpfc_dmabuf *)(pmb->context1);
12638 			ndlp = (struct lpfc_nodelist *)pmb->context2;
12639 			/* Reg_LOGIN of dflt RPI was successful. Now lets get
12640 			 * RID of the PPI using the same mbox buffer.
12641 			 */
12642 			lpfc_unreg_login(phba, vport->vpi,
12643 					 pmbox->un.varWords[0], pmb);
12644 			pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
12645 			pmb->context1 = mp;
12646 			pmb->context2 = ndlp;
12647 			pmb->vport = vport;
12648 			rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
12649 			if (rc != MBX_BUSY)
12650 				lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
12651 						LOG_SLI, "0385 rc should "
12652 						"have been MBX_BUSY\n");
12653 			if (rc != MBX_NOT_FINISHED)
12654 				goto send_current_mbox;
12655 		}
12656 	}
12657 	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
12658 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12659 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
12660 
12661 	/* There is mailbox completion work to do */
12662 	spin_lock_irqsave(&phba->hbalock, iflags);
12663 	__lpfc_mbox_cmpl_put(phba, pmb);
12664 	phba->work_ha |= HA_MBATT;
12665 	spin_unlock_irqrestore(&phba->hbalock, iflags);
12666 	workposted = true;
12667 
12668 send_current_mbox:
12669 	spin_lock_irqsave(&phba->hbalock, iflags);
12670 	/* Release the mailbox command posting token */
12671 	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
12672 	/* Setting active mailbox pointer need to be in sync to flag clear */
12673 	phba->sli.mbox_active = NULL;
12674 	spin_unlock_irqrestore(&phba->hbalock, iflags);
12675 	/* Wake up worker thread to post the next pending mailbox command */
12676 	lpfc_worker_wake_up(phba);
12677 out_no_mqe_complete:
12678 	if (bf_get(lpfc_trailer_consumed, mcqe))
12679 		lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
12680 	return workposted;
12681 }
12682 
12683 /**
12684  * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
12685  * @phba: Pointer to HBA context object.
12686  * @cqe: Pointer to mailbox completion queue entry.
12687  *
12688  * This routine process a mailbox completion queue entry, it invokes the
12689  * proper mailbox complete handling or asynchrous event handling routine
12690  * according to the MCQE's async bit.
12691  *
12692  * Return: true if work posted to worker thread, otherwise false.
12693  **/
12694 static bool
12695 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
12696 {
12697 	struct lpfc_mcqe mcqe;
12698 	bool workposted;
12699 
12700 	/* Copy the mailbox MCQE and convert endian order as needed */
12701 	lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
12702 
12703 	/* Invoke the proper event handling routine */
12704 	if (!bf_get(lpfc_trailer_async, &mcqe))
12705 		workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
12706 	else
12707 		workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
12708 	return workposted;
12709 }
12710 
12711 /**
12712  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
12713  * @phba: Pointer to HBA context object.
12714  * @cq: Pointer to associated CQ
12715  * @wcqe: Pointer to work-queue completion queue entry.
12716  *
12717  * This routine handles an ELS work-queue completion event.
12718  *
12719  * Return: true if work posted to worker thread, otherwise false.
12720  **/
12721 static bool
12722 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12723 			     struct lpfc_wcqe_complete *wcqe)
12724 {
12725 	struct lpfc_iocbq *irspiocbq;
12726 	unsigned long iflags;
12727 	struct lpfc_sli_ring *pring = cq->pring;
12728 	int txq_cnt = 0;
12729 	int txcmplq_cnt = 0;
12730 	int fcp_txcmplq_cnt = 0;
12731 
12732 	/* Get an irspiocbq for later ELS response processing use */
12733 	irspiocbq = lpfc_sli_get_iocbq(phba);
12734 	if (!irspiocbq) {
12735 		if (!list_empty(&pring->txq))
12736 			txq_cnt++;
12737 		if (!list_empty(&pring->txcmplq))
12738 			txcmplq_cnt++;
12739 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12740 			"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
12741 			"fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
12742 			txq_cnt, phba->iocb_cnt,
12743 			fcp_txcmplq_cnt,
12744 			txcmplq_cnt);
12745 		return false;
12746 	}
12747 
12748 	/* Save off the slow-path queue event for work thread to process */
12749 	memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
12750 	spin_lock_irqsave(&phba->hbalock, iflags);
12751 	list_add_tail(&irspiocbq->cq_event.list,
12752 		      &phba->sli4_hba.sp_queue_event);
12753 	phba->hba_flag |= HBA_SP_QUEUE_EVT;
12754 	spin_unlock_irqrestore(&phba->hbalock, iflags);
12755 
12756 	return true;
12757 }
12758 
12759 /**
12760  * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
12761  * @phba: Pointer to HBA context object.
12762  * @wcqe: Pointer to work-queue completion queue entry.
12763  *
12764  * This routine handles slow-path WQ entry consumed event by invoking the
12765  * proper WQ release routine to the slow-path WQ.
12766  **/
12767 static void
12768 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
12769 			     struct lpfc_wcqe_release *wcqe)
12770 {
12771 	/* sanity check on queue memory */
12772 	if (unlikely(!phba->sli4_hba.els_wq))
12773 		return;
12774 	/* Check for the slow-path ELS work queue */
12775 	if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
12776 		lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
12777 				     bf_get(lpfc_wcqe_r_wqe_index, wcqe));
12778 	else
12779 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
12780 				"2579 Slow-path wqe consume event carries "
12781 				"miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
12782 				bf_get(lpfc_wcqe_r_wqe_index, wcqe),
12783 				phba->sli4_hba.els_wq->queue_id);
12784 }
12785 
12786 /**
12787  * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
12788  * @phba: Pointer to HBA context object.
12789  * @cq: Pointer to a WQ completion queue.
12790  * @wcqe: Pointer to work-queue completion queue entry.
12791  *
12792  * This routine handles an XRI abort event.
12793  *
12794  * Return: true if work posted to worker thread, otherwise false.
12795  **/
12796 static bool
12797 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
12798 				   struct lpfc_queue *cq,
12799 				   struct sli4_wcqe_xri_aborted *wcqe)
12800 {
12801 	bool workposted = false;
12802 	struct lpfc_cq_event *cq_event;
12803 	unsigned long iflags;
12804 
12805 	/* Allocate a new internal CQ_EVENT entry */
12806 	cq_event = lpfc_sli4_cq_event_alloc(phba);
12807 	if (!cq_event) {
12808 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12809 				"0602 Failed to allocate CQ_EVENT entry\n");
12810 		return false;
12811 	}
12812 
12813 	/* Move the CQE into the proper xri abort event list */
12814 	memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
12815 	switch (cq->subtype) {
12816 	case LPFC_FCP:
12817 		spin_lock_irqsave(&phba->hbalock, iflags);
12818 		list_add_tail(&cq_event->list,
12819 			      &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
12820 		/* Set the fcp xri abort event flag */
12821 		phba->hba_flag |= FCP_XRI_ABORT_EVENT;
12822 		spin_unlock_irqrestore(&phba->hbalock, iflags);
12823 		workposted = true;
12824 		break;
12825 	case LPFC_ELS:
12826 		spin_lock_irqsave(&phba->hbalock, iflags);
12827 		list_add_tail(&cq_event->list,
12828 			      &phba->sli4_hba.sp_els_xri_aborted_work_queue);
12829 		/* Set the els xri abort event flag */
12830 		phba->hba_flag |= ELS_XRI_ABORT_EVENT;
12831 		spin_unlock_irqrestore(&phba->hbalock, iflags);
12832 		workposted = true;
12833 		break;
12834 	case LPFC_NVME:
12835 		spin_lock_irqsave(&phba->hbalock, iflags);
12836 		list_add_tail(&cq_event->list,
12837 			      &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
12838 		/* Set the nvme xri abort event flag */
12839 		phba->hba_flag |= NVME_XRI_ABORT_EVENT;
12840 		spin_unlock_irqrestore(&phba->hbalock, iflags);
12841 		workposted = true;
12842 		break;
12843 	default:
12844 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12845 				"0603 Invalid CQ subtype %d: "
12846 				"%08x %08x %08x %08x\n",
12847 				cq->subtype, wcqe->word0, wcqe->parameter,
12848 				wcqe->word2, wcqe->word3);
12849 		lpfc_sli4_cq_event_release(phba, cq_event);
12850 		workposted = false;
12851 		break;
12852 	}
12853 	return workposted;
12854 }
12855 
12856 /**
12857  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
12858  * @phba: Pointer to HBA context object.
12859  * @rcqe: Pointer to receive-queue completion queue entry.
12860  *
12861  * This routine process a receive-queue completion queue entry.
12862  *
12863  * Return: true if work posted to worker thread, otherwise false.
12864  **/
12865 static bool
12866 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
12867 {
12868 	bool workposted = false;
12869 	struct fc_frame_header *fc_hdr;
12870 	struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
12871 	struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
12872 	struct lpfc_nvmet_tgtport *tgtp;
12873 	struct hbq_dmabuf *dma_buf;
12874 	uint32_t status, rq_id;
12875 	unsigned long iflags;
12876 
12877 	/* sanity check on queue memory */
12878 	if (unlikely(!hrq) || unlikely(!drq))
12879 		return workposted;
12880 
12881 	if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
12882 		rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
12883 	else
12884 		rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
12885 	if (rq_id != hrq->queue_id)
12886 		goto out;
12887 
12888 	status = bf_get(lpfc_rcqe_status, rcqe);
12889 	switch (status) {
12890 	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
12891 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12892 				"2537 Receive Frame Truncated!!\n");
12893 	case FC_STATUS_RQ_SUCCESS:
12894 		lpfc_sli4_rq_release(hrq, drq);
12895 		spin_lock_irqsave(&phba->hbalock, iflags);
12896 		dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
12897 		if (!dma_buf) {
12898 			hrq->RQ_no_buf_found++;
12899 			spin_unlock_irqrestore(&phba->hbalock, iflags);
12900 			goto out;
12901 		}
12902 		hrq->RQ_rcv_buf++;
12903 		hrq->RQ_buf_posted--;
12904 		memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
12905 
12906 		/* If a NVME LS event (type 0x28), treat it as Fast path */
12907 		fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
12908 
12909 		/* save off the frame for the word thread to process */
12910 		list_add_tail(&dma_buf->cq_event.list,
12911 			      &phba->sli4_hba.sp_queue_event);
12912 		/* Frame received */
12913 		phba->hba_flag |= HBA_SP_QUEUE_EVT;
12914 		spin_unlock_irqrestore(&phba->hbalock, iflags);
12915 		workposted = true;
12916 		break;
12917 	case FC_STATUS_INSUFF_BUF_FRM_DISC:
12918 		if (phba->nvmet_support) {
12919 			tgtp = phba->targetport->private;
12920 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
12921 					"6402 RQE Error x%x, posted %d err_cnt "
12922 					"%d: %x %x %x\n",
12923 					status, hrq->RQ_buf_posted,
12924 					hrq->RQ_no_posted_buf,
12925 					atomic_read(&tgtp->rcv_fcp_cmd_in),
12926 					atomic_read(&tgtp->rcv_fcp_cmd_out),
12927 					atomic_read(&tgtp->xmt_fcp_release));
12928 		}
12929 		/* fallthrough */
12930 
12931 	case FC_STATUS_INSUFF_BUF_NEED_BUF:
12932 		hrq->RQ_no_posted_buf++;
12933 		/* Post more buffers if possible */
12934 		spin_lock_irqsave(&phba->hbalock, iflags);
12935 		phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
12936 		spin_unlock_irqrestore(&phba->hbalock, iflags);
12937 		workposted = true;
12938 		break;
12939 	}
12940 out:
12941 	return workposted;
12942 }
12943 
12944 /**
12945  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
12946  * @phba: Pointer to HBA context object.
12947  * @cq: Pointer to the completion queue.
12948  * @wcqe: Pointer to a completion queue entry.
12949  *
12950  * This routine process a slow-path work-queue or receive queue completion queue
12951  * entry.
12952  *
12953  * Return: true if work posted to worker thread, otherwise false.
12954  **/
12955 static bool
12956 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
12957 			 struct lpfc_cqe *cqe)
12958 {
12959 	struct lpfc_cqe cqevt;
12960 	bool workposted = false;
12961 
12962 	/* Copy the work queue CQE and convert endian order if needed */
12963 	lpfc_sli_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
12964 
12965 	/* Check and process for different type of WCQE and dispatch */
12966 	switch (bf_get(lpfc_cqe_code, &cqevt)) {
12967 	case CQE_CODE_COMPL_WQE:
12968 		/* Process the WQ/RQ complete event */
12969 		phba->last_completion_time = jiffies;
12970 		workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
12971 				(struct lpfc_wcqe_complete *)&cqevt);
12972 		break;
12973 	case CQE_CODE_RELEASE_WQE:
12974 		/* Process the WQ release event */
12975 		lpfc_sli4_sp_handle_rel_wcqe(phba,
12976 				(struct lpfc_wcqe_release *)&cqevt);
12977 		break;
12978 	case CQE_CODE_XRI_ABORTED:
12979 		/* Process the WQ XRI abort event */
12980 		phba->last_completion_time = jiffies;
12981 		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
12982 				(struct sli4_wcqe_xri_aborted *)&cqevt);
12983 		break;
12984 	case CQE_CODE_RECEIVE:
12985 	case CQE_CODE_RECEIVE_V1:
12986 		/* Process the RQ event */
12987 		phba->last_completion_time = jiffies;
12988 		workposted = lpfc_sli4_sp_handle_rcqe(phba,
12989 				(struct lpfc_rcqe *)&cqevt);
12990 		break;
12991 	default:
12992 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
12993 				"0388 Not a valid WCQE code: x%x\n",
12994 				bf_get(lpfc_cqe_code, &cqevt));
12995 		break;
12996 	}
12997 	return workposted;
12998 }
12999 
13000 /**
13001  * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13002  * @phba: Pointer to HBA context object.
13003  * @eqe: Pointer to fast-path event queue entry.
13004  *
13005  * This routine process a event queue entry from the slow-path event queue.
13006  * It will check the MajorCode and MinorCode to determine this is for a
13007  * completion event on a completion queue, if not, an error shall be logged
13008  * and just return. Otherwise, it will get to the corresponding completion
13009  * queue and process all the entries on that completion queue, rearm the
13010  * completion queue, and then return.
13011  *
13012  **/
13013 static int
13014 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13015 	struct lpfc_queue *speq)
13016 {
13017 	struct lpfc_queue *cq = NULL, *childq;
13018 	struct lpfc_cqe *cqe;
13019 	bool workposted = false;
13020 	int ecount = 0;
13021 	uint16_t cqid;
13022 
13023 	/* Get the reference to the corresponding CQ */
13024 	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13025 
13026 	list_for_each_entry(childq, &speq->child_list, list) {
13027 		if (childq->queue_id == cqid) {
13028 			cq = childq;
13029 			break;
13030 		}
13031 	}
13032 	if (unlikely(!cq)) {
13033 		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13034 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13035 					"0365 Slow-path CQ identifier "
13036 					"(%d) does not exist\n", cqid);
13037 		return 0;
13038 	}
13039 
13040 	/* Save EQ associated with this CQ */
13041 	cq->assoc_qp = speq;
13042 
13043 	/* Process all the entries to the CQ */
13044 	switch (cq->type) {
13045 	case LPFC_MCQ:
13046 		while ((cqe = lpfc_sli4_cq_get(cq))) {
13047 			workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
13048 			if (!(++ecount % cq->entry_repost))
13049 				break;
13050 			cq->CQ_mbox++;
13051 		}
13052 		break;
13053 	case LPFC_WCQ:
13054 		while ((cqe = lpfc_sli4_cq_get(cq))) {
13055 			if ((cq->subtype == LPFC_FCP) ||
13056 			    (cq->subtype == LPFC_NVME))
13057 				workposted |= lpfc_sli4_fp_handle_cqe(phba, cq,
13058 								       cqe);
13059 			else
13060 				workposted |= lpfc_sli4_sp_handle_cqe(phba, cq,
13061 								      cqe);
13062 			if (!(++ecount % cq->entry_repost))
13063 				break;
13064 		}
13065 
13066 		/* Track the max number of CQEs processed in 1 EQ */
13067 		if (ecount > cq->CQ_max_cqe)
13068 			cq->CQ_max_cqe = ecount;
13069 		break;
13070 	default:
13071 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13072 				"0370 Invalid completion queue type (%d)\n",
13073 				cq->type);
13074 		return 0;
13075 	}
13076 
13077 	/* Catch the no cq entry condition, log an error */
13078 	if (unlikely(ecount == 0))
13079 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13080 				"0371 No entry from the CQ: identifier "
13081 				"(x%x), type (%d)\n", cq->queue_id, cq->type);
13082 
13083 	/* In any case, flash and re-arm the RCQ */
13084 	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13085 
13086 	/* wake up worker thread if there are works to be done */
13087 	if (workposted)
13088 		lpfc_worker_wake_up(phba);
13089 
13090 	return ecount;
13091 }
13092 
13093 /**
13094  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
13095  * @phba: Pointer to HBA context object.
13096  * @cq: Pointer to associated CQ
13097  * @wcqe: Pointer to work-queue completion queue entry.
13098  *
13099  * This routine process a fast-path work queue completion entry from fast-path
13100  * event queue for FCP command response completion.
13101  **/
13102 static void
13103 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13104 			     struct lpfc_wcqe_complete *wcqe)
13105 {
13106 	struct lpfc_sli_ring *pring = cq->pring;
13107 	struct lpfc_iocbq *cmdiocbq;
13108 	struct lpfc_iocbq irspiocbq;
13109 	unsigned long iflags;
13110 
13111 	/* Check for response status */
13112 	if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
13113 		/* If resource errors reported from HBA, reduce queue
13114 		 * depth of the SCSI device.
13115 		 */
13116 		if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
13117 		     IOSTAT_LOCAL_REJECT)) &&
13118 		    ((wcqe->parameter & IOERR_PARAM_MASK) ==
13119 		     IOERR_NO_RESOURCES))
13120 			phba->lpfc_rampdown_queue_depth(phba);
13121 
13122 		/* Log the error status */
13123 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13124 				"0373 FCP complete error: status=x%x, "
13125 				"hw_status=x%x, total_data_specified=%d, "
13126 				"parameter=x%x, word3=x%x\n",
13127 				bf_get(lpfc_wcqe_c_status, wcqe),
13128 				bf_get(lpfc_wcqe_c_hw_status, wcqe),
13129 				wcqe->total_data_placed, wcqe->parameter,
13130 				wcqe->word3);
13131 	}
13132 
13133 	/* Look up the FCP command IOCB and create pseudo response IOCB */
13134 	spin_lock_irqsave(&pring->ring_lock, iflags);
13135 	pring->stats.iocb_event++;
13136 	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
13137 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
13138 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
13139 	if (unlikely(!cmdiocbq)) {
13140 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13141 				"0374 FCP complete with no corresponding "
13142 				"cmdiocb: iotag (%d)\n",
13143 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
13144 		return;
13145 	}
13146 
13147 	if (cq->assoc_qp)
13148 		cmdiocbq->isr_timestamp =
13149 			cq->assoc_qp->isr_timestamp;
13150 
13151 	if (cmdiocbq->iocb_cmpl == NULL) {
13152 		if (cmdiocbq->wqe_cmpl) {
13153 			if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13154 				spin_lock_irqsave(&phba->hbalock, iflags);
13155 				cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13156 				spin_unlock_irqrestore(&phba->hbalock, iflags);
13157 			}
13158 
13159 			/* Pass the cmd_iocb and the wcqe to the upper layer */
13160 			(cmdiocbq->wqe_cmpl)(phba, cmdiocbq, wcqe);
13161 			return;
13162 		}
13163 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13164 				"0375 FCP cmdiocb not callback function "
13165 				"iotag: (%d)\n",
13166 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
13167 		return;
13168 	}
13169 
13170 	/* Fake the irspiocb and copy necessary response information */
13171 	lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
13172 
13173 	if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
13174 		spin_lock_irqsave(&phba->hbalock, iflags);
13175 		cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
13176 		spin_unlock_irqrestore(&phba->hbalock, iflags);
13177 	}
13178 
13179 	/* Pass the cmd_iocb and the rsp state to the upper layer */
13180 	(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
13181 }
13182 
13183 /**
13184  * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13185  * @phba: Pointer to HBA context object.
13186  * @cq: Pointer to completion queue.
13187  * @wcqe: Pointer to work-queue completion queue entry.
13188  *
13189  * This routine handles an fast-path WQ entry consumed event by invoking the
13190  * proper WQ release routine to the slow-path WQ.
13191  **/
13192 static void
13193 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13194 			     struct lpfc_wcqe_release *wcqe)
13195 {
13196 	struct lpfc_queue *childwq;
13197 	bool wqid_matched = false;
13198 	uint16_t hba_wqid;
13199 
13200 	/* Check for fast-path FCP work queue release */
13201 	hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
13202 	list_for_each_entry(childwq, &cq->child_list, list) {
13203 		if (childwq->queue_id == hba_wqid) {
13204 			lpfc_sli4_wq_release(childwq,
13205 					bf_get(lpfc_wcqe_r_wqe_index, wcqe));
13206 			wqid_matched = true;
13207 			break;
13208 		}
13209 	}
13210 	/* Report warning log message if no match found */
13211 	if (wqid_matched != true)
13212 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13213 				"2580 Fast-path wqe consume event carries "
13214 				"miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
13215 }
13216 
13217 /**
13218  * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13219  * @phba: Pointer to HBA context object.
13220  * @rcqe: Pointer to receive-queue completion queue entry.
13221  *
13222  * This routine process a receive-queue completion queue entry.
13223  *
13224  * Return: true if work posted to worker thread, otherwise false.
13225  **/
13226 static bool
13227 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13228 			    struct lpfc_rcqe *rcqe)
13229 {
13230 	bool workposted = false;
13231 	struct lpfc_queue *hrq;
13232 	struct lpfc_queue *drq;
13233 	struct rqb_dmabuf *dma_buf;
13234 	struct fc_frame_header *fc_hdr;
13235 	struct lpfc_nvmet_tgtport *tgtp;
13236 	uint32_t status, rq_id;
13237 	unsigned long iflags;
13238 	uint32_t fctl, idx;
13239 
13240 	if ((phba->nvmet_support == 0) ||
13241 	    (phba->sli4_hba.nvmet_cqset == NULL))
13242 		return workposted;
13243 
13244 	idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
13245 	hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
13246 	drq = phba->sli4_hba.nvmet_mrq_data[idx];
13247 
13248 	/* sanity check on queue memory */
13249 	if (unlikely(!hrq) || unlikely(!drq))
13250 		return workposted;
13251 
13252 	if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
13253 		rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
13254 	else
13255 		rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
13256 
13257 	if ((phba->nvmet_support == 0) ||
13258 	    (rq_id != hrq->queue_id))
13259 		return workposted;
13260 
13261 	status = bf_get(lpfc_rcqe_status, rcqe);
13262 	switch (status) {
13263 	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
13264 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13265 				"6126 Receive Frame Truncated!!\n");
13266 		/* Drop thru */
13267 	case FC_STATUS_RQ_SUCCESS:
13268 		lpfc_sli4_rq_release(hrq, drq);
13269 		spin_lock_irqsave(&phba->hbalock, iflags);
13270 		dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
13271 		if (!dma_buf) {
13272 			hrq->RQ_no_buf_found++;
13273 			spin_unlock_irqrestore(&phba->hbalock, iflags);
13274 			goto out;
13275 		}
13276 		spin_unlock_irqrestore(&phba->hbalock, iflags);
13277 		hrq->RQ_rcv_buf++;
13278 		hrq->RQ_buf_posted--;
13279 		fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
13280 
13281 		/* Just some basic sanity checks on FCP Command frame */
13282 		fctl = (fc_hdr->fh_f_ctl[0] << 16 |
13283 		fc_hdr->fh_f_ctl[1] << 8 |
13284 		fc_hdr->fh_f_ctl[2]);
13285 		if (((fctl &
13286 		    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
13287 		    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
13288 		    (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
13289 			goto drop;
13290 
13291 		if (fc_hdr->fh_type == FC_TYPE_FCP) {
13292 			dma_buf->bytes_recv = bf_get(lpfc_rcqe_length,  rcqe);
13293 			lpfc_nvmet_unsol_fcp_event(
13294 				phba, idx, dma_buf,
13295 				cq->assoc_qp->isr_timestamp);
13296 			return false;
13297 		}
13298 drop:
13299 		lpfc_in_buf_free(phba, &dma_buf->dbuf);
13300 		break;
13301 	case FC_STATUS_INSUFF_BUF_FRM_DISC:
13302 		if (phba->nvmet_support) {
13303 			tgtp = phba->targetport->private;
13304 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_NVME,
13305 					"6401 RQE Error x%x, posted %d err_cnt "
13306 					"%d: %x %x %x\n",
13307 					status, hrq->RQ_buf_posted,
13308 					hrq->RQ_no_posted_buf,
13309 					atomic_read(&tgtp->rcv_fcp_cmd_in),
13310 					atomic_read(&tgtp->rcv_fcp_cmd_out),
13311 					atomic_read(&tgtp->xmt_fcp_release));
13312 		}
13313 		/* fallthrough */
13314 
13315 	case FC_STATUS_INSUFF_BUF_NEED_BUF:
13316 		hrq->RQ_no_posted_buf++;
13317 		/* Post more buffers if possible */
13318 		break;
13319 	}
13320 out:
13321 	return workposted;
13322 }
13323 
13324 /**
13325  * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
13326  * @cq: Pointer to the completion queue.
13327  * @eqe: Pointer to fast-path completion queue entry.
13328  *
13329  * This routine process a fast-path work queue completion entry from fast-path
13330  * event queue for FCP command response completion.
13331  **/
13332 static int
13333 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
13334 			 struct lpfc_cqe *cqe)
13335 {
13336 	struct lpfc_wcqe_release wcqe;
13337 	bool workposted = false;
13338 
13339 	/* Copy the work queue CQE and convert endian order if needed */
13340 	lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
13341 
13342 	/* Check and process for different type of WCQE and dispatch */
13343 	switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
13344 	case CQE_CODE_COMPL_WQE:
13345 	case CQE_CODE_NVME_ERSP:
13346 		cq->CQ_wq++;
13347 		/* Process the WQ complete event */
13348 		phba->last_completion_time = jiffies;
13349 		if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME))
13350 			lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13351 				(struct lpfc_wcqe_complete *)&wcqe);
13352 		if (cq->subtype == LPFC_NVME_LS)
13353 			lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
13354 				(struct lpfc_wcqe_complete *)&wcqe);
13355 		break;
13356 	case CQE_CODE_RELEASE_WQE:
13357 		cq->CQ_release_wqe++;
13358 		/* Process the WQ release event */
13359 		lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
13360 				(struct lpfc_wcqe_release *)&wcqe);
13361 		break;
13362 	case CQE_CODE_XRI_ABORTED:
13363 		cq->CQ_xri_aborted++;
13364 		/* Process the WQ XRI abort event */
13365 		phba->last_completion_time = jiffies;
13366 		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
13367 				(struct sli4_wcqe_xri_aborted *)&wcqe);
13368 		break;
13369 	case CQE_CODE_RECEIVE_V1:
13370 	case CQE_CODE_RECEIVE:
13371 		phba->last_completion_time = jiffies;
13372 		if (cq->subtype == LPFC_NVMET) {
13373 			workposted = lpfc_sli4_nvmet_handle_rcqe(
13374 				phba, cq, (struct lpfc_rcqe *)&wcqe);
13375 		}
13376 		break;
13377 	default:
13378 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13379 				"0144 Not a valid CQE code: x%x\n",
13380 				bf_get(lpfc_wcqe_c_code, &wcqe));
13381 		break;
13382 	}
13383 	return workposted;
13384 }
13385 
13386 /**
13387  * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
13388  * @phba: Pointer to HBA context object.
13389  * @eqe: Pointer to fast-path event queue entry.
13390  *
13391  * This routine process a event queue entry from the fast-path event queue.
13392  * It will check the MajorCode and MinorCode to determine this is for a
13393  * completion event on a completion queue, if not, an error shall be logged
13394  * and just return. Otherwise, it will get to the corresponding completion
13395  * queue and process all the entries on the completion queue, rearm the
13396  * completion queue, and then return.
13397  **/
13398 static int
13399 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
13400 			uint32_t qidx)
13401 {
13402 	struct lpfc_queue *cq = NULL;
13403 	struct lpfc_cqe *cqe;
13404 	bool workposted = false;
13405 	uint16_t cqid, id;
13406 	int ecount = 0;
13407 
13408 	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13409 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13410 				"0366 Not a valid completion "
13411 				"event: majorcode=x%x, minorcode=x%x\n",
13412 				bf_get_le32(lpfc_eqe_major_code, eqe),
13413 				bf_get_le32(lpfc_eqe_minor_code, eqe));
13414 		return 0;
13415 	}
13416 
13417 	/* Get the reference to the corresponding CQ */
13418 	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13419 
13420 	if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
13421 		id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
13422 		if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
13423 			/* Process NVMET unsol rcv */
13424 			cq = phba->sli4_hba.nvmet_cqset[cqid - id];
13425 			goto  process_cq;
13426 		}
13427 	}
13428 
13429 	if (phba->sli4_hba.nvme_cq_map &&
13430 	    (cqid == phba->sli4_hba.nvme_cq_map[qidx])) {
13431 		/* Process NVME / NVMET command completion */
13432 		cq = phba->sli4_hba.nvme_cq[qidx];
13433 		goto  process_cq;
13434 	}
13435 
13436 	if (phba->sli4_hba.fcp_cq_map &&
13437 	    (cqid == phba->sli4_hba.fcp_cq_map[qidx])) {
13438 		/* Process FCP command completion */
13439 		cq = phba->sli4_hba.fcp_cq[qidx];
13440 		goto  process_cq;
13441 	}
13442 
13443 	if (phba->sli4_hba.nvmels_cq &&
13444 	    (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
13445 		/* Process NVME unsol rcv */
13446 		cq = phba->sli4_hba.nvmels_cq;
13447 	}
13448 
13449 	/* Otherwise this is a Slow path event */
13450 	if (cq == NULL) {
13451 		ecount = lpfc_sli4_sp_handle_eqe(phba, eqe,
13452 						 phba->sli4_hba.hba_eq[qidx]);
13453 		return ecount;
13454 	}
13455 
13456 process_cq:
13457 	if (unlikely(cqid != cq->queue_id)) {
13458 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13459 				"0368 Miss-matched fast-path completion "
13460 				"queue identifier: eqcqid=%d, fcpcqid=%d\n",
13461 				cqid, cq->queue_id);
13462 		return 0;
13463 	}
13464 
13465 	/* Save EQ associated with this CQ */
13466 	cq->assoc_qp = phba->sli4_hba.hba_eq[qidx];
13467 
13468 	/* Process all the entries to the CQ */
13469 	while ((cqe = lpfc_sli4_cq_get(cq))) {
13470 		workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13471 		if (!(++ecount % cq->entry_repost))
13472 			break;
13473 	}
13474 
13475 	/* Track the max number of CQEs processed in 1 EQ */
13476 	if (ecount > cq->CQ_max_cqe)
13477 		cq->CQ_max_cqe = ecount;
13478 	cq->assoc_qp->EQ_cqe_cnt += ecount;
13479 
13480 	/* Catch the no cq entry condition */
13481 	if (unlikely(ecount == 0))
13482 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13483 				"0369 No entry from fast-path completion "
13484 				"queue fcpcqid=%d\n", cq->queue_id);
13485 
13486 	/* In any case, flash and re-arm the CQ */
13487 	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13488 
13489 	/* wake up worker thread if there are works to be done */
13490 	if (workposted)
13491 		lpfc_worker_wake_up(phba);
13492 
13493 	return ecount;
13494 }
13495 
13496 static void
13497 lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
13498 {
13499 	struct lpfc_eqe *eqe;
13500 
13501 	/* walk all the EQ entries and drop on the floor */
13502 	while ((eqe = lpfc_sli4_eq_get(eq)))
13503 		;
13504 
13505 	/* Clear and re-arm the EQ */
13506 	lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
13507 }
13508 
13509 
13510 /**
13511  * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
13512  *			     entry
13513  * @phba: Pointer to HBA context object.
13514  * @eqe: Pointer to fast-path event queue entry.
13515  *
13516  * This routine process a event queue entry from the Flash Optimized Fabric
13517  * event queue.  It will check the MajorCode and MinorCode to determine this
13518  * is for a completion event on a completion queue, if not, an error shall be
13519  * logged and just return. Otherwise, it will get to the corresponding
13520  * completion queue and process all the entries on the completion queue, rearm
13521  * the completion queue, and then return.
13522  **/
13523 static void
13524 lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
13525 {
13526 	struct lpfc_queue *cq;
13527 	struct lpfc_cqe *cqe;
13528 	bool workposted = false;
13529 	uint16_t cqid;
13530 	int ecount = 0;
13531 
13532 	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
13533 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13534 				"9147 Not a valid completion "
13535 				"event: majorcode=x%x, minorcode=x%x\n",
13536 				bf_get_le32(lpfc_eqe_major_code, eqe),
13537 				bf_get_le32(lpfc_eqe_minor_code, eqe));
13538 		return;
13539 	}
13540 
13541 	/* Get the reference to the corresponding CQ */
13542 	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
13543 
13544 	/* Next check for OAS */
13545 	cq = phba->sli4_hba.oas_cq;
13546 	if (unlikely(!cq)) {
13547 		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
13548 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13549 					"9148 OAS completion queue "
13550 					"does not exist\n");
13551 		return;
13552 	}
13553 
13554 	if (unlikely(cqid != cq->queue_id)) {
13555 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13556 				"9149 Miss-matched fast-path compl "
13557 				"queue id: eqcqid=%d, fcpcqid=%d\n",
13558 				cqid, cq->queue_id);
13559 		return;
13560 	}
13561 
13562 	/* Save EQ associated with this CQ */
13563 	cq->assoc_qp = phba->sli4_hba.fof_eq;
13564 
13565 	/* Process all the entries to the OAS CQ */
13566 	while ((cqe = lpfc_sli4_cq_get(cq))) {
13567 		workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
13568 		if (!(++ecount % cq->entry_repost))
13569 			break;
13570 	}
13571 
13572 	/* Track the max number of CQEs processed in 1 EQ */
13573 	if (ecount > cq->CQ_max_cqe)
13574 		cq->CQ_max_cqe = ecount;
13575 	cq->assoc_qp->EQ_cqe_cnt += ecount;
13576 
13577 	/* Catch the no cq entry condition */
13578 	if (unlikely(ecount == 0))
13579 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13580 				"9153 No entry from fast-path completion "
13581 				"queue fcpcqid=%d\n", cq->queue_id);
13582 
13583 	/* In any case, flash and re-arm the CQ */
13584 	lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
13585 
13586 	/* wake up worker thread if there are works to be done */
13587 	if (workposted)
13588 		lpfc_worker_wake_up(phba);
13589 }
13590 
13591 /**
13592  * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
13593  * @irq: Interrupt number.
13594  * @dev_id: The device context pointer.
13595  *
13596  * This function is directly called from the PCI layer as an interrupt
13597  * service routine when device with SLI-4 interface spec is enabled with
13598  * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
13599  * IOCB ring event in the HBA. However, when the device is enabled with either
13600  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13601  * device-level interrupt handler. When the PCI slot is in error recovery
13602  * or the HBA is undergoing initialization, the interrupt handler will not
13603  * process the interrupt. The Flash Optimized Fabric ring event are handled in
13604  * the intrrupt context. This function is called without any lock held.
13605  * It gets the hbalock to access and update SLI data structures. Note that,
13606  * the EQ to CQ are one-to-one map such that the EQ index is
13607  * equal to that of CQ index.
13608  *
13609  * This function returns IRQ_HANDLED when interrupt is handled else it
13610  * returns IRQ_NONE.
13611  **/
13612 irqreturn_t
13613 lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
13614 {
13615 	struct lpfc_hba *phba;
13616 	struct lpfc_hba_eq_hdl *hba_eq_hdl;
13617 	struct lpfc_queue *eq;
13618 	struct lpfc_eqe *eqe;
13619 	unsigned long iflag;
13620 	int ecount = 0;
13621 
13622 	/* Get the driver's phba structure from the dev_id */
13623 	hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13624 	phba = hba_eq_hdl->phba;
13625 
13626 	if (unlikely(!phba))
13627 		return IRQ_NONE;
13628 
13629 	/* Get to the EQ struct associated with this vector */
13630 	eq = phba->sli4_hba.fof_eq;
13631 	if (unlikely(!eq))
13632 		return IRQ_NONE;
13633 
13634 	/* Check device state for handling interrupt */
13635 	if (unlikely(lpfc_intr_state_check(phba))) {
13636 		/* Check again for link_state with lock held */
13637 		spin_lock_irqsave(&phba->hbalock, iflag);
13638 		if (phba->link_state < LPFC_LINK_DOWN)
13639 			/* Flush, clear interrupt, and rearm the EQ */
13640 			lpfc_sli4_eq_flush(phba, eq);
13641 		spin_unlock_irqrestore(&phba->hbalock, iflag);
13642 		return IRQ_NONE;
13643 	}
13644 
13645 	/*
13646 	 * Process all the event on FCP fast-path EQ
13647 	 */
13648 	while ((eqe = lpfc_sli4_eq_get(eq))) {
13649 		lpfc_sli4_fof_handle_eqe(phba, eqe);
13650 		if (!(++ecount % eq->entry_repost))
13651 			break;
13652 		eq->EQ_processed++;
13653 	}
13654 
13655 	/* Track the max number of EQEs processed in 1 intr */
13656 	if (ecount > eq->EQ_max_eqe)
13657 		eq->EQ_max_eqe = ecount;
13658 
13659 
13660 	if (unlikely(ecount == 0)) {
13661 		eq->EQ_no_entry++;
13662 
13663 		if (phba->intr_type == MSIX)
13664 			/* MSI-X treated interrupt served as no EQ share INT */
13665 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13666 					"9145 MSI-X interrupt with no EQE\n");
13667 		else {
13668 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
13669 					"9146 ISR interrupt with no EQE\n");
13670 			/* Non MSI-X treated on interrupt as EQ share INT */
13671 			return IRQ_NONE;
13672 		}
13673 	}
13674 	/* Always clear and re-arm the fast-path EQ */
13675 	lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
13676 	return IRQ_HANDLED;
13677 }
13678 
13679 /**
13680  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
13681  * @irq: Interrupt number.
13682  * @dev_id: The device context pointer.
13683  *
13684  * This function is directly called from the PCI layer as an interrupt
13685  * service routine when device with SLI-4 interface spec is enabled with
13686  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13687  * ring event in the HBA. However, when the device is enabled with either
13688  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13689  * device-level interrupt handler. When the PCI slot is in error recovery
13690  * or the HBA is undergoing initialization, the interrupt handler will not
13691  * process the interrupt. The SCSI FCP fast-path ring event are handled in
13692  * the intrrupt context. This function is called without any lock held.
13693  * It gets the hbalock to access and update SLI data structures. Note that,
13694  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
13695  * equal to that of FCP CQ index.
13696  *
13697  * The link attention and ELS ring attention events are handled
13698  * by the worker thread. The interrupt handler signals the worker thread
13699  * and returns for these events. This function is called without any lock
13700  * held. It gets the hbalock to access and update SLI data structures.
13701  *
13702  * This function returns IRQ_HANDLED when interrupt is handled else it
13703  * returns IRQ_NONE.
13704  **/
13705 irqreturn_t
13706 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
13707 {
13708 	struct lpfc_hba *phba;
13709 	struct lpfc_hba_eq_hdl *hba_eq_hdl;
13710 	struct lpfc_queue *fpeq;
13711 	struct lpfc_eqe *eqe;
13712 	unsigned long iflag;
13713 	int ecount = 0;
13714 	int ccount = 0;
13715 	int hba_eqidx;
13716 
13717 	/* Get the driver's phba structure from the dev_id */
13718 	hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
13719 	phba = hba_eq_hdl->phba;
13720 	hba_eqidx = hba_eq_hdl->idx;
13721 
13722 	if (unlikely(!phba))
13723 		return IRQ_NONE;
13724 	if (unlikely(!phba->sli4_hba.hba_eq))
13725 		return IRQ_NONE;
13726 
13727 	/* Get to the EQ struct associated with this vector */
13728 	fpeq = phba->sli4_hba.hba_eq[hba_eqidx];
13729 	if (unlikely(!fpeq))
13730 		return IRQ_NONE;
13731 
13732 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13733 	if (phba->ktime_on)
13734 		fpeq->isr_timestamp = ktime_get_ns();
13735 #endif
13736 
13737 	if (lpfc_fcp_look_ahead) {
13738 		if (atomic_dec_and_test(&hba_eq_hdl->hba_eq_in_use))
13739 			lpfc_sli4_eq_clr_intr(fpeq);
13740 		else {
13741 			atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13742 			return IRQ_NONE;
13743 		}
13744 	}
13745 
13746 	/* Check device state for handling interrupt */
13747 	if (unlikely(lpfc_intr_state_check(phba))) {
13748 		/* Check again for link_state with lock held */
13749 		spin_lock_irqsave(&phba->hbalock, iflag);
13750 		if (phba->link_state < LPFC_LINK_DOWN)
13751 			/* Flush, clear interrupt, and rearm the EQ */
13752 			lpfc_sli4_eq_flush(phba, fpeq);
13753 		spin_unlock_irqrestore(&phba->hbalock, iflag);
13754 		if (lpfc_fcp_look_ahead)
13755 			atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13756 		return IRQ_NONE;
13757 	}
13758 
13759 	/*
13760 	 * Process all the event on FCP fast-path EQ
13761 	 */
13762 	while ((eqe = lpfc_sli4_eq_get(fpeq))) {
13763 		if (eqe == NULL)
13764 			break;
13765 
13766 		ccount += lpfc_sli4_hba_handle_eqe(phba, eqe, hba_eqidx);
13767 		if (!(++ecount % fpeq->entry_repost) ||
13768 		    ccount > LPFC_MAX_ISR_CQE)
13769 			break;
13770 		fpeq->EQ_processed++;
13771 	}
13772 
13773 	/* Track the max number of EQEs processed in 1 intr */
13774 	if (ecount > fpeq->EQ_max_eqe)
13775 		fpeq->EQ_max_eqe = ecount;
13776 
13777 	/* Always clear and re-arm the fast-path EQ */
13778 	lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
13779 
13780 	if (unlikely(ecount == 0)) {
13781 		fpeq->EQ_no_entry++;
13782 
13783 		if (lpfc_fcp_look_ahead) {
13784 			atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13785 			return IRQ_NONE;
13786 		}
13787 
13788 		if (phba->intr_type == MSIX)
13789 			/* MSI-X treated interrupt served as no EQ share INT */
13790 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
13791 					"0358 MSI-X interrupt with no EQE\n");
13792 		else
13793 			/* Non MSI-X treated on interrupt as EQ share INT */
13794 			return IRQ_NONE;
13795 	}
13796 
13797 	if (lpfc_fcp_look_ahead)
13798 		atomic_inc(&hba_eq_hdl->hba_eq_in_use);
13799 
13800 	return IRQ_HANDLED;
13801 } /* lpfc_sli4_fp_intr_handler */
13802 
13803 /**
13804  * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
13805  * @irq: Interrupt number.
13806  * @dev_id: The device context pointer.
13807  *
13808  * This function is the device-level interrupt handler to device with SLI-4
13809  * interface spec, called from the PCI layer when either MSI or Pin-IRQ
13810  * interrupt mode is enabled and there is an event in the HBA which requires
13811  * driver attention. This function invokes the slow-path interrupt attention
13812  * handling function and fast-path interrupt attention handling function in
13813  * turn to process the relevant HBA attention events. This function is called
13814  * without any lock held. It gets the hbalock to access and update SLI data
13815  * structures.
13816  *
13817  * This function returns IRQ_HANDLED when interrupt is handled, else it
13818  * returns IRQ_NONE.
13819  **/
13820 irqreturn_t
13821 lpfc_sli4_intr_handler(int irq, void *dev_id)
13822 {
13823 	struct lpfc_hba  *phba;
13824 	irqreturn_t hba_irq_rc;
13825 	bool hba_handled = false;
13826 	int qidx;
13827 
13828 	/* Get the driver's phba structure from the dev_id */
13829 	phba = (struct lpfc_hba *)dev_id;
13830 
13831 	if (unlikely(!phba))
13832 		return IRQ_NONE;
13833 
13834 	/*
13835 	 * Invoke fast-path host attention interrupt handling as appropriate.
13836 	 */
13837 	for (qidx = 0; qidx < phba->io_channel_irqs; qidx++) {
13838 		hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
13839 					&phba->sli4_hba.hba_eq_hdl[qidx]);
13840 		if (hba_irq_rc == IRQ_HANDLED)
13841 			hba_handled |= true;
13842 	}
13843 
13844 	if (phba->cfg_fof) {
13845 		hba_irq_rc = lpfc_sli4_fof_intr_handler(irq,
13846 					&phba->sli4_hba.hba_eq_hdl[qidx]);
13847 		if (hba_irq_rc == IRQ_HANDLED)
13848 			hba_handled |= true;
13849 	}
13850 
13851 	return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
13852 } /* lpfc_sli4_intr_handler */
13853 
13854 /**
13855  * lpfc_sli4_queue_free - free a queue structure and associated memory
13856  * @queue: The queue structure to free.
13857  *
13858  * This function frees a queue structure and the DMAable memory used for
13859  * the host resident queue. This function must be called after destroying the
13860  * queue on the HBA.
13861  **/
13862 void
13863 lpfc_sli4_queue_free(struct lpfc_queue *queue)
13864 {
13865 	struct lpfc_dmabuf *dmabuf;
13866 
13867 	if (!queue)
13868 		return;
13869 
13870 	while (!list_empty(&queue->page_list)) {
13871 		list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
13872 				 list);
13873 		dma_free_coherent(&queue->phba->pcidev->dev, SLI4_PAGE_SIZE,
13874 				  dmabuf->virt, dmabuf->phys);
13875 		kfree(dmabuf);
13876 	}
13877 	if (queue->rqbp) {
13878 		lpfc_free_rq_buffer(queue->phba, queue);
13879 		kfree(queue->rqbp);
13880 	}
13881 
13882 	if (!list_empty(&queue->wq_list))
13883 		list_del(&queue->wq_list);
13884 
13885 	kfree(queue);
13886 	return;
13887 }
13888 
13889 /**
13890  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
13891  * @phba: The HBA that this queue is being created on.
13892  * @entry_size: The size of each queue entry for this queue.
13893  * @entry count: The number of entries that this queue will handle.
13894  *
13895  * This function allocates a queue structure and the DMAable memory used for
13896  * the host resident queue. This function must be called before creating the
13897  * queue on the HBA.
13898  **/
13899 struct lpfc_queue *
13900 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
13901 		      uint32_t entry_count)
13902 {
13903 	struct lpfc_queue *queue;
13904 	struct lpfc_dmabuf *dmabuf;
13905 	int x, total_qe_count;
13906 	void *dma_pointer;
13907 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
13908 
13909 	if (!phba->sli4_hba.pc_sli4_params.supported)
13910 		hw_page_size = SLI4_PAGE_SIZE;
13911 
13912 	queue = kzalloc(sizeof(struct lpfc_queue) +
13913 			(sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
13914 	if (!queue)
13915 		return NULL;
13916 	queue->page_count = (ALIGN(entry_size * entry_count,
13917 			hw_page_size))/hw_page_size;
13918 
13919 	/* If needed, Adjust page count to match the max the adapter supports */
13920 	if (queue->page_count > phba->sli4_hba.pc_sli4_params.wqpcnt)
13921 		queue->page_count = phba->sli4_hba.pc_sli4_params.wqpcnt;
13922 
13923 	INIT_LIST_HEAD(&queue->list);
13924 	INIT_LIST_HEAD(&queue->wq_list);
13925 	INIT_LIST_HEAD(&queue->page_list);
13926 	INIT_LIST_HEAD(&queue->child_list);
13927 	for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
13928 		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
13929 		if (!dmabuf)
13930 			goto out_fail;
13931 		dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
13932 						   hw_page_size, &dmabuf->phys,
13933 						   GFP_KERNEL);
13934 		if (!dmabuf->virt) {
13935 			kfree(dmabuf);
13936 			goto out_fail;
13937 		}
13938 		dmabuf->buffer_tag = x;
13939 		list_add_tail(&dmabuf->list, &queue->page_list);
13940 		/* initialize queue's entry array */
13941 		dma_pointer = dmabuf->virt;
13942 		for (; total_qe_count < entry_count &&
13943 		     dma_pointer < (hw_page_size + dmabuf->virt);
13944 		     total_qe_count++, dma_pointer += entry_size) {
13945 			queue->qe[total_qe_count].address = dma_pointer;
13946 		}
13947 	}
13948 	queue->entry_size = entry_size;
13949 	queue->entry_count = entry_count;
13950 	queue->phba = phba;
13951 
13952 	/* entry_repost will be set during q creation */
13953 
13954 	return queue;
13955 out_fail:
13956 	lpfc_sli4_queue_free(queue);
13957 	return NULL;
13958 }
13959 
13960 /**
13961  * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
13962  * @phba: HBA structure that indicates port to create a queue on.
13963  * @pci_barset: PCI BAR set flag.
13964  *
13965  * This function shall perform iomap of the specified PCI BAR address to host
13966  * memory address if not already done so and return it. The returned host
13967  * memory address can be NULL.
13968  */
13969 static void __iomem *
13970 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
13971 {
13972 	if (!phba->pcidev)
13973 		return NULL;
13974 
13975 	switch (pci_barset) {
13976 	case WQ_PCI_BAR_0_AND_1:
13977 		return phba->pci_bar0_memmap_p;
13978 	case WQ_PCI_BAR_2_AND_3:
13979 		return phba->pci_bar2_memmap_p;
13980 	case WQ_PCI_BAR_4_AND_5:
13981 		return phba->pci_bar4_memmap_p;
13982 	default:
13983 		break;
13984 	}
13985 	return NULL;
13986 }
13987 
13988 /**
13989  * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on FCP EQs
13990  * @phba: HBA structure that indicates port to create a queue on.
13991  * @startq: The starting FCP EQ to modify
13992  *
13993  * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
13994  * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
13995  * updated in one mailbox command.
13996  *
13997  * The @phba struct is used to send mailbox command to HBA. The @startq
13998  * is used to get the starting FCP EQ to change.
13999  * This function is asynchronous and will wait for the mailbox
14000  * command to finish before continuing.
14001  *
14002  * On success this function will return a zero. If unable to allocate enough
14003  * memory this function will return -ENOMEM. If the queue create mailbox command
14004  * fails this function will return -ENXIO.
14005  **/
14006 int
14007 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
14008 			 uint32_t numq, uint32_t imax)
14009 {
14010 	struct lpfc_mbx_modify_eq_delay *eq_delay;
14011 	LPFC_MBOXQ_t *mbox;
14012 	struct lpfc_queue *eq;
14013 	int cnt, rc, length, status = 0;
14014 	uint32_t shdr_status, shdr_add_status;
14015 	uint32_t result, val;
14016 	int qidx;
14017 	union lpfc_sli4_cfg_shdr *shdr;
14018 	uint16_t dmult;
14019 
14020 	if (startq >= phba->io_channel_irqs)
14021 		return 0;
14022 
14023 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14024 	if (!mbox)
14025 		return -ENOMEM;
14026 	length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
14027 		  sizeof(struct lpfc_sli4_cfg_mhdr));
14028 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14029 			 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
14030 			 length, LPFC_SLI4_MBX_EMBED);
14031 	eq_delay = &mbox->u.mqe.un.eq_delay;
14032 
14033 	/* Calculate delay multiper from maximum interrupt per second */
14034 	result = imax / phba->io_channel_irqs;
14035 	if (result > LPFC_DMULT_CONST || result == 0)
14036 		dmult = 0;
14037 	else
14038 		dmult = LPFC_DMULT_CONST/result - 1;
14039 	if (dmult > LPFC_DMULT_MAX)
14040 		dmult = LPFC_DMULT_MAX;
14041 
14042 	cnt = 0;
14043 	for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
14044 		eq = phba->sli4_hba.hba_eq[qidx];
14045 		if (!eq)
14046 			continue;
14047 		eq->q_mode = imax;
14048 		eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
14049 		eq_delay->u.request.eq[cnt].phase = 0;
14050 		eq_delay->u.request.eq[cnt].delay_multi = dmult;
14051 		cnt++;
14052 
14053 		/* q_mode is only used for auto_imax */
14054 		if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
14055 			/* Use EQ Delay Register method for q_mode */
14056 
14057 			/* Convert for EQ Delay register */
14058 			val =  phba->cfg_fcp_imax;
14059 			if (val) {
14060 				/* First, interrupts per sec per EQ */
14061 				val = phba->cfg_fcp_imax /
14062 					phba->io_channel_irqs;
14063 
14064 				/* us delay between each interrupt */
14065 				val = LPFC_SEC_TO_USEC / val;
14066 			}
14067 			eq->q_mode = val;
14068 		} else {
14069 			eq->q_mode = imax;
14070 		}
14071 
14072 		if (cnt >= numq)
14073 			break;
14074 	}
14075 	eq_delay->u.request.num_eq = cnt;
14076 
14077 	mbox->vport = phba->pport;
14078 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14079 	mbox->context1 = NULL;
14080 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14081 	shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
14082 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14083 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14084 	if (shdr_status || shdr_add_status || rc) {
14085 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14086 				"2512 MODIFY_EQ_DELAY mailbox failed with "
14087 				"status x%x add_status x%x, mbx status x%x\n",
14088 				shdr_status, shdr_add_status, rc);
14089 		status = -ENXIO;
14090 	}
14091 	mempool_free(mbox, phba->mbox_mem_pool);
14092 	return status;
14093 }
14094 
14095 /**
14096  * lpfc_eq_create - Create an Event Queue on the HBA
14097  * @phba: HBA structure that indicates port to create a queue on.
14098  * @eq: The queue structure to use to create the event queue.
14099  * @imax: The maximum interrupt per second limit.
14100  *
14101  * This function creates an event queue, as detailed in @eq, on a port,
14102  * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14103  *
14104  * The @phba struct is used to send mailbox command to HBA. The @eq struct
14105  * is used to get the entry count and entry size that are necessary to
14106  * determine the number of pages to allocate and use for this queue. This
14107  * function will send the EQ_CREATE mailbox command to the HBA to setup the
14108  * event queue. This function is asynchronous and will wait for the mailbox
14109  * command to finish before continuing.
14110  *
14111  * On success this function will return a zero. If unable to allocate enough
14112  * memory this function will return -ENOMEM. If the queue create mailbox command
14113  * fails this function will return -ENXIO.
14114  **/
14115 int
14116 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
14117 {
14118 	struct lpfc_mbx_eq_create *eq_create;
14119 	LPFC_MBOXQ_t *mbox;
14120 	int rc, length, status = 0;
14121 	struct lpfc_dmabuf *dmabuf;
14122 	uint32_t shdr_status, shdr_add_status;
14123 	union lpfc_sli4_cfg_shdr *shdr;
14124 	uint16_t dmult;
14125 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14126 
14127 	/* sanity check on queue memory */
14128 	if (!eq)
14129 		return -ENODEV;
14130 	if (!phba->sli4_hba.pc_sli4_params.supported)
14131 		hw_page_size = SLI4_PAGE_SIZE;
14132 
14133 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14134 	if (!mbox)
14135 		return -ENOMEM;
14136 	length = (sizeof(struct lpfc_mbx_eq_create) -
14137 		  sizeof(struct lpfc_sli4_cfg_mhdr));
14138 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14139 			 LPFC_MBOX_OPCODE_EQ_CREATE,
14140 			 length, LPFC_SLI4_MBX_EMBED);
14141 	eq_create = &mbox->u.mqe.un.eq_create;
14142 	bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
14143 	       eq->page_count);
14144 	bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
14145 	       LPFC_EQE_SIZE);
14146 	bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
14147 	/* don't setup delay multiplier using EQ_CREATE */
14148 	dmult = 0;
14149 	bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
14150 	       dmult);
14151 	switch (eq->entry_count) {
14152 	default:
14153 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14154 				"0360 Unsupported EQ count. (%d)\n",
14155 				eq->entry_count);
14156 		if (eq->entry_count < 256)
14157 			return -EINVAL;
14158 		/* otherwise default to smallest count (drop through) */
14159 	case 256:
14160 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14161 		       LPFC_EQ_CNT_256);
14162 		break;
14163 	case 512:
14164 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14165 		       LPFC_EQ_CNT_512);
14166 		break;
14167 	case 1024:
14168 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14169 		       LPFC_EQ_CNT_1024);
14170 		break;
14171 	case 2048:
14172 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14173 		       LPFC_EQ_CNT_2048);
14174 		break;
14175 	case 4096:
14176 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
14177 		       LPFC_EQ_CNT_4096);
14178 		break;
14179 	}
14180 	list_for_each_entry(dmabuf, &eq->page_list, list) {
14181 		memset(dmabuf->virt, 0, hw_page_size);
14182 		eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14183 					putPaddrLow(dmabuf->phys);
14184 		eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14185 					putPaddrHigh(dmabuf->phys);
14186 	}
14187 	mbox->vport = phba->pport;
14188 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
14189 	mbox->context1 = NULL;
14190 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14191 	shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
14192 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14193 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14194 	if (shdr_status || shdr_add_status || rc) {
14195 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14196 				"2500 EQ_CREATE mailbox failed with "
14197 				"status x%x add_status x%x, mbx status x%x\n",
14198 				shdr_status, shdr_add_status, rc);
14199 		status = -ENXIO;
14200 	}
14201 	eq->type = LPFC_EQ;
14202 	eq->subtype = LPFC_NONE;
14203 	eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
14204 	if (eq->queue_id == 0xFFFF)
14205 		status = -ENXIO;
14206 	eq->host_index = 0;
14207 	eq->hba_index = 0;
14208 	eq->entry_repost = LPFC_EQ_REPOST;
14209 
14210 	mempool_free(mbox, phba->mbox_mem_pool);
14211 	return status;
14212 }
14213 
14214 /**
14215  * lpfc_cq_create - Create a Completion Queue on the HBA
14216  * @phba: HBA structure that indicates port to create a queue on.
14217  * @cq: The queue structure to use to create the completion queue.
14218  * @eq: The event queue to bind this completion queue to.
14219  *
14220  * This function creates a completion queue, as detailed in @wq, on a port,
14221  * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14222  *
14223  * The @phba struct is used to send mailbox command to HBA. The @cq struct
14224  * is used to get the entry count and entry size that are necessary to
14225  * determine the number of pages to allocate and use for this queue. The @eq
14226  * is used to indicate which event queue to bind this completion queue to. This
14227  * function will send the CQ_CREATE mailbox command to the HBA to setup the
14228  * completion queue. This function is asynchronous and will wait for the mailbox
14229  * command to finish before continuing.
14230  *
14231  * On success this function will return a zero. If unable to allocate enough
14232  * memory this function will return -ENOMEM. If the queue create mailbox command
14233  * fails this function will return -ENXIO.
14234  **/
14235 int
14236 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
14237 	       struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
14238 {
14239 	struct lpfc_mbx_cq_create *cq_create;
14240 	struct lpfc_dmabuf *dmabuf;
14241 	LPFC_MBOXQ_t *mbox;
14242 	int rc, length, status = 0;
14243 	uint32_t shdr_status, shdr_add_status;
14244 	union lpfc_sli4_cfg_shdr *shdr;
14245 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14246 
14247 	/* sanity check on queue memory */
14248 	if (!cq || !eq)
14249 		return -ENODEV;
14250 	if (!phba->sli4_hba.pc_sli4_params.supported)
14251 		hw_page_size = SLI4_PAGE_SIZE;
14252 
14253 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14254 	if (!mbox)
14255 		return -ENOMEM;
14256 	length = (sizeof(struct lpfc_mbx_cq_create) -
14257 		  sizeof(struct lpfc_sli4_cfg_mhdr));
14258 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14259 			 LPFC_MBOX_OPCODE_CQ_CREATE,
14260 			 length, LPFC_SLI4_MBX_EMBED);
14261 	cq_create = &mbox->u.mqe.un.cq_create;
14262 	shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
14263 	bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
14264 		    cq->page_count);
14265 	bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
14266 	bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
14267 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
14268 	       phba->sli4_hba.pc_sli4_params.cqv);
14269 	if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
14270 		/* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
14271 		bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request, 1);
14272 		bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
14273 		       eq->queue_id);
14274 	} else {
14275 		bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
14276 		       eq->queue_id);
14277 	}
14278 	switch (cq->entry_count) {
14279 	default:
14280 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14281 				"0361 Unsupported CQ count: "
14282 				"entry cnt %d sz %d pg cnt %d\n",
14283 				cq->entry_count, cq->entry_size,
14284 				cq->page_count);
14285 		if (cq->entry_count < 256) {
14286 			status = -EINVAL;
14287 			goto out;
14288 		}
14289 		/* otherwise default to smallest count (drop through) */
14290 	case 256:
14291 		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14292 		       LPFC_CQ_CNT_256);
14293 		break;
14294 	case 512:
14295 		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14296 		       LPFC_CQ_CNT_512);
14297 		break;
14298 	case 1024:
14299 		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
14300 		       LPFC_CQ_CNT_1024);
14301 		break;
14302 	}
14303 	list_for_each_entry(dmabuf, &cq->page_list, list) {
14304 		memset(dmabuf->virt, 0, hw_page_size);
14305 		cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14306 					putPaddrLow(dmabuf->phys);
14307 		cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14308 					putPaddrHigh(dmabuf->phys);
14309 	}
14310 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14311 
14312 	/* The IOCTL status is embedded in the mailbox subheader. */
14313 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14314 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14315 	if (shdr_status || shdr_add_status || rc) {
14316 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14317 				"2501 CQ_CREATE mailbox failed with "
14318 				"status x%x add_status x%x, mbx status x%x\n",
14319 				shdr_status, shdr_add_status, rc);
14320 		status = -ENXIO;
14321 		goto out;
14322 	}
14323 	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14324 	if (cq->queue_id == 0xFFFF) {
14325 		status = -ENXIO;
14326 		goto out;
14327 	}
14328 	/* link the cq onto the parent eq child list */
14329 	list_add_tail(&cq->list, &eq->child_list);
14330 	/* Set up completion queue's type and subtype */
14331 	cq->type = type;
14332 	cq->subtype = subtype;
14333 	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
14334 	cq->assoc_qid = eq->queue_id;
14335 	cq->host_index = 0;
14336 	cq->hba_index = 0;
14337 	cq->entry_repost = LPFC_CQ_REPOST;
14338 
14339 out:
14340 	mempool_free(mbox, phba->mbox_mem_pool);
14341 	return status;
14342 }
14343 
14344 /**
14345  * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
14346  * @phba: HBA structure that indicates port to create a queue on.
14347  * @cqp: The queue structure array to use to create the completion queues.
14348  * @eqp: The event queue array to bind these completion queues to.
14349  *
14350  * This function creates a set of  completion queue, s to support MRQ
14351  * as detailed in @cqp, on a port,
14352  * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
14353  *
14354  * The @phba struct is used to send mailbox command to HBA. The @cq struct
14355  * is used to get the entry count and entry size that are necessary to
14356  * determine the number of pages to allocate and use for this queue. The @eq
14357  * is used to indicate which event queue to bind this completion queue to. This
14358  * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
14359  * completion queue. This function is asynchronous and will wait for the mailbox
14360  * command to finish before continuing.
14361  *
14362  * On success this function will return a zero. If unable to allocate enough
14363  * memory this function will return -ENOMEM. If the queue create mailbox command
14364  * fails this function will return -ENXIO.
14365  **/
14366 int
14367 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
14368 		   struct lpfc_queue **eqp, uint32_t type, uint32_t subtype)
14369 {
14370 	struct lpfc_queue *cq;
14371 	struct lpfc_queue *eq;
14372 	struct lpfc_mbx_cq_create_set *cq_set;
14373 	struct lpfc_dmabuf *dmabuf;
14374 	LPFC_MBOXQ_t *mbox;
14375 	int rc, length, alloclen, status = 0;
14376 	int cnt, idx, numcq, page_idx = 0;
14377 	uint32_t shdr_status, shdr_add_status;
14378 	union lpfc_sli4_cfg_shdr *shdr;
14379 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14380 
14381 	/* sanity check on queue memory */
14382 	numcq = phba->cfg_nvmet_mrq;
14383 	if (!cqp || !eqp || !numcq)
14384 		return -ENODEV;
14385 	if (!phba->sli4_hba.pc_sli4_params.supported)
14386 		hw_page_size = SLI4_PAGE_SIZE;
14387 
14388 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14389 	if (!mbox)
14390 		return -ENOMEM;
14391 
14392 	length = sizeof(struct lpfc_mbx_cq_create_set);
14393 	length += ((numcq * cqp[0]->page_count) *
14394 		   sizeof(struct dma_address));
14395 	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14396 			LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
14397 			LPFC_SLI4_MBX_NEMBED);
14398 	if (alloclen < length) {
14399 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14400 				"3098 Allocated DMA memory size (%d) is "
14401 				"less than the requested DMA memory size "
14402 				"(%d)\n", alloclen, length);
14403 		status = -ENOMEM;
14404 		goto out;
14405 	}
14406 	cq_set = mbox->sge_array->addr[0];
14407 	shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
14408 	bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
14409 
14410 	for (idx = 0; idx < numcq; idx++) {
14411 		cq = cqp[idx];
14412 		eq = eqp[idx];
14413 		if (!cq || !eq) {
14414 			status = -ENOMEM;
14415 			goto out;
14416 		}
14417 
14418 		switch (idx) {
14419 		case 0:
14420 			bf_set(lpfc_mbx_cq_create_set_page_size,
14421 			       &cq_set->u.request,
14422 			       (hw_page_size / SLI4_PAGE_SIZE));
14423 			bf_set(lpfc_mbx_cq_create_set_num_pages,
14424 			       &cq_set->u.request, cq->page_count);
14425 			bf_set(lpfc_mbx_cq_create_set_evt,
14426 			       &cq_set->u.request, 1);
14427 			bf_set(lpfc_mbx_cq_create_set_valid,
14428 			       &cq_set->u.request, 1);
14429 			bf_set(lpfc_mbx_cq_create_set_cqe_size,
14430 			       &cq_set->u.request, 0);
14431 			bf_set(lpfc_mbx_cq_create_set_num_cq,
14432 			       &cq_set->u.request, numcq);
14433 			switch (cq->entry_count) {
14434 			default:
14435 				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14436 						"3118 Bad CQ count. (%d)\n",
14437 						cq->entry_count);
14438 				if (cq->entry_count < 256) {
14439 					status = -EINVAL;
14440 					goto out;
14441 				}
14442 				/* otherwise default to smallest (drop thru) */
14443 			case 256:
14444 				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14445 				       &cq_set->u.request, LPFC_CQ_CNT_256);
14446 				break;
14447 			case 512:
14448 				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14449 				       &cq_set->u.request, LPFC_CQ_CNT_512);
14450 				break;
14451 			case 1024:
14452 				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
14453 				       &cq_set->u.request, LPFC_CQ_CNT_1024);
14454 				break;
14455 			}
14456 			bf_set(lpfc_mbx_cq_create_set_eq_id0,
14457 			       &cq_set->u.request, eq->queue_id);
14458 			break;
14459 		case 1:
14460 			bf_set(lpfc_mbx_cq_create_set_eq_id1,
14461 			       &cq_set->u.request, eq->queue_id);
14462 			break;
14463 		case 2:
14464 			bf_set(lpfc_mbx_cq_create_set_eq_id2,
14465 			       &cq_set->u.request, eq->queue_id);
14466 			break;
14467 		case 3:
14468 			bf_set(lpfc_mbx_cq_create_set_eq_id3,
14469 			       &cq_set->u.request, eq->queue_id);
14470 			break;
14471 		case 4:
14472 			bf_set(lpfc_mbx_cq_create_set_eq_id4,
14473 			       &cq_set->u.request, eq->queue_id);
14474 			break;
14475 		case 5:
14476 			bf_set(lpfc_mbx_cq_create_set_eq_id5,
14477 			       &cq_set->u.request, eq->queue_id);
14478 			break;
14479 		case 6:
14480 			bf_set(lpfc_mbx_cq_create_set_eq_id6,
14481 			       &cq_set->u.request, eq->queue_id);
14482 			break;
14483 		case 7:
14484 			bf_set(lpfc_mbx_cq_create_set_eq_id7,
14485 			       &cq_set->u.request, eq->queue_id);
14486 			break;
14487 		case 8:
14488 			bf_set(lpfc_mbx_cq_create_set_eq_id8,
14489 			       &cq_set->u.request, eq->queue_id);
14490 			break;
14491 		case 9:
14492 			bf_set(lpfc_mbx_cq_create_set_eq_id9,
14493 			       &cq_set->u.request, eq->queue_id);
14494 			break;
14495 		case 10:
14496 			bf_set(lpfc_mbx_cq_create_set_eq_id10,
14497 			       &cq_set->u.request, eq->queue_id);
14498 			break;
14499 		case 11:
14500 			bf_set(lpfc_mbx_cq_create_set_eq_id11,
14501 			       &cq_set->u.request, eq->queue_id);
14502 			break;
14503 		case 12:
14504 			bf_set(lpfc_mbx_cq_create_set_eq_id12,
14505 			       &cq_set->u.request, eq->queue_id);
14506 			break;
14507 		case 13:
14508 			bf_set(lpfc_mbx_cq_create_set_eq_id13,
14509 			       &cq_set->u.request, eq->queue_id);
14510 			break;
14511 		case 14:
14512 			bf_set(lpfc_mbx_cq_create_set_eq_id14,
14513 			       &cq_set->u.request, eq->queue_id);
14514 			break;
14515 		case 15:
14516 			bf_set(lpfc_mbx_cq_create_set_eq_id15,
14517 			       &cq_set->u.request, eq->queue_id);
14518 			break;
14519 		}
14520 
14521 		/* link the cq onto the parent eq child list */
14522 		list_add_tail(&cq->list, &eq->child_list);
14523 		/* Set up completion queue's type and subtype */
14524 		cq->type = type;
14525 		cq->subtype = subtype;
14526 		cq->assoc_qid = eq->queue_id;
14527 		cq->host_index = 0;
14528 		cq->hba_index = 0;
14529 		cq->entry_repost = LPFC_CQ_REPOST;
14530 
14531 		rc = 0;
14532 		list_for_each_entry(dmabuf, &cq->page_list, list) {
14533 			memset(dmabuf->virt, 0, hw_page_size);
14534 			cnt = page_idx + dmabuf->buffer_tag;
14535 			cq_set->u.request.page[cnt].addr_lo =
14536 					putPaddrLow(dmabuf->phys);
14537 			cq_set->u.request.page[cnt].addr_hi =
14538 					putPaddrHigh(dmabuf->phys);
14539 			rc++;
14540 		}
14541 		page_idx += rc;
14542 	}
14543 
14544 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14545 
14546 	/* The IOCTL status is embedded in the mailbox subheader. */
14547 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14548 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14549 	if (shdr_status || shdr_add_status || rc) {
14550 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14551 				"3119 CQ_CREATE_SET mailbox failed with "
14552 				"status x%x add_status x%x, mbx status x%x\n",
14553 				shdr_status, shdr_add_status, rc);
14554 		status = -ENXIO;
14555 		goto out;
14556 	}
14557 	rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
14558 	if (rc == 0xFFFF) {
14559 		status = -ENXIO;
14560 		goto out;
14561 	}
14562 
14563 	for (idx = 0; idx < numcq; idx++) {
14564 		cq = cqp[idx];
14565 		cq->queue_id = rc + idx;
14566 	}
14567 
14568 out:
14569 	lpfc_sli4_mbox_cmd_free(phba, mbox);
14570 	return status;
14571 }
14572 
14573 /**
14574  * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
14575  * @phba: HBA structure that indicates port to create a queue on.
14576  * @mq: The queue structure to use to create the mailbox queue.
14577  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
14578  * @cq: The completion queue to associate with this cq.
14579  *
14580  * This function provides failback (fb) functionality when the
14581  * mq_create_ext fails on older FW generations.  It's purpose is identical
14582  * to mq_create_ext otherwise.
14583  *
14584  * This routine cannot fail as all attributes were previously accessed and
14585  * initialized in mq_create_ext.
14586  **/
14587 static void
14588 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
14589 		       LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
14590 {
14591 	struct lpfc_mbx_mq_create *mq_create;
14592 	struct lpfc_dmabuf *dmabuf;
14593 	int length;
14594 
14595 	length = (sizeof(struct lpfc_mbx_mq_create) -
14596 		  sizeof(struct lpfc_sli4_cfg_mhdr));
14597 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14598 			 LPFC_MBOX_OPCODE_MQ_CREATE,
14599 			 length, LPFC_SLI4_MBX_EMBED);
14600 	mq_create = &mbox->u.mqe.un.mq_create;
14601 	bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
14602 	       mq->page_count);
14603 	bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
14604 	       cq->queue_id);
14605 	bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
14606 	switch (mq->entry_count) {
14607 	case 16:
14608 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14609 		       LPFC_MQ_RING_SIZE_16);
14610 		break;
14611 	case 32:
14612 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14613 		       LPFC_MQ_RING_SIZE_32);
14614 		break;
14615 	case 64:
14616 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14617 		       LPFC_MQ_RING_SIZE_64);
14618 		break;
14619 	case 128:
14620 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
14621 		       LPFC_MQ_RING_SIZE_128);
14622 		break;
14623 	}
14624 	list_for_each_entry(dmabuf, &mq->page_list, list) {
14625 		mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
14626 			putPaddrLow(dmabuf->phys);
14627 		mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
14628 			putPaddrHigh(dmabuf->phys);
14629 	}
14630 }
14631 
14632 /**
14633  * lpfc_mq_create - Create a mailbox Queue on the HBA
14634  * @phba: HBA structure that indicates port to create a queue on.
14635  * @mq: The queue structure to use to create the mailbox queue.
14636  * @cq: The completion queue to associate with this cq.
14637  * @subtype: The queue's subtype.
14638  *
14639  * This function creates a mailbox queue, as detailed in @mq, on a port,
14640  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
14641  *
14642  * The @phba struct is used to send mailbox command to HBA. The @cq struct
14643  * is used to get the entry count and entry size that are necessary to
14644  * determine the number of pages to allocate and use for this queue. This
14645  * function will send the MQ_CREATE mailbox command to the HBA to setup the
14646  * mailbox queue. This function is asynchronous and will wait for the mailbox
14647  * command to finish before continuing.
14648  *
14649  * On success this function will return a zero. If unable to allocate enough
14650  * memory this function will return -ENOMEM. If the queue create mailbox command
14651  * fails this function will return -ENXIO.
14652  **/
14653 int32_t
14654 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
14655 	       struct lpfc_queue *cq, uint32_t subtype)
14656 {
14657 	struct lpfc_mbx_mq_create *mq_create;
14658 	struct lpfc_mbx_mq_create_ext *mq_create_ext;
14659 	struct lpfc_dmabuf *dmabuf;
14660 	LPFC_MBOXQ_t *mbox;
14661 	int rc, length, status = 0;
14662 	uint32_t shdr_status, shdr_add_status;
14663 	union lpfc_sli4_cfg_shdr *shdr;
14664 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14665 
14666 	/* sanity check on queue memory */
14667 	if (!mq || !cq)
14668 		return -ENODEV;
14669 	if (!phba->sli4_hba.pc_sli4_params.supported)
14670 		hw_page_size = SLI4_PAGE_SIZE;
14671 
14672 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14673 	if (!mbox)
14674 		return -ENOMEM;
14675 	length = (sizeof(struct lpfc_mbx_mq_create_ext) -
14676 		  sizeof(struct lpfc_sli4_cfg_mhdr));
14677 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
14678 			 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
14679 			 length, LPFC_SLI4_MBX_EMBED);
14680 
14681 	mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
14682 	shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
14683 	bf_set(lpfc_mbx_mq_create_ext_num_pages,
14684 	       &mq_create_ext->u.request, mq->page_count);
14685 	bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
14686 	       &mq_create_ext->u.request, 1);
14687 	bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
14688 	       &mq_create_ext->u.request, 1);
14689 	bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
14690 	       &mq_create_ext->u.request, 1);
14691 	bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
14692 	       &mq_create_ext->u.request, 1);
14693 	bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
14694 	       &mq_create_ext->u.request, 1);
14695 	bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
14696 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
14697 	       phba->sli4_hba.pc_sli4_params.mqv);
14698 	if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
14699 		bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
14700 		       cq->queue_id);
14701 	else
14702 		bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
14703 		       cq->queue_id);
14704 	switch (mq->entry_count) {
14705 	default:
14706 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
14707 				"0362 Unsupported MQ count. (%d)\n",
14708 				mq->entry_count);
14709 		if (mq->entry_count < 16) {
14710 			status = -EINVAL;
14711 			goto out;
14712 		}
14713 		/* otherwise default to smallest count (drop through) */
14714 	case 16:
14715 		bf_set(lpfc_mq_context_ring_size,
14716 		       &mq_create_ext->u.request.context,
14717 		       LPFC_MQ_RING_SIZE_16);
14718 		break;
14719 	case 32:
14720 		bf_set(lpfc_mq_context_ring_size,
14721 		       &mq_create_ext->u.request.context,
14722 		       LPFC_MQ_RING_SIZE_32);
14723 		break;
14724 	case 64:
14725 		bf_set(lpfc_mq_context_ring_size,
14726 		       &mq_create_ext->u.request.context,
14727 		       LPFC_MQ_RING_SIZE_64);
14728 		break;
14729 	case 128:
14730 		bf_set(lpfc_mq_context_ring_size,
14731 		       &mq_create_ext->u.request.context,
14732 		       LPFC_MQ_RING_SIZE_128);
14733 		break;
14734 	}
14735 	list_for_each_entry(dmabuf, &mq->page_list, list) {
14736 		memset(dmabuf->virt, 0, hw_page_size);
14737 		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
14738 					putPaddrLow(dmabuf->phys);
14739 		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
14740 					putPaddrHigh(dmabuf->phys);
14741 	}
14742 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14743 	mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
14744 			      &mq_create_ext->u.response);
14745 	if (rc != MBX_SUCCESS) {
14746 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14747 				"2795 MQ_CREATE_EXT failed with "
14748 				"status x%x. Failback to MQ_CREATE.\n",
14749 				rc);
14750 		lpfc_mq_create_fb_init(phba, mq, mbox, cq);
14751 		mq_create = &mbox->u.mqe.un.mq_create;
14752 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14753 		shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
14754 		mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
14755 				      &mq_create->u.response);
14756 	}
14757 
14758 	/* The IOCTL status is embedded in the mailbox subheader. */
14759 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14760 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14761 	if (shdr_status || shdr_add_status || rc) {
14762 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14763 				"2502 MQ_CREATE mailbox failed with "
14764 				"status x%x add_status x%x, mbx status x%x\n",
14765 				shdr_status, shdr_add_status, rc);
14766 		status = -ENXIO;
14767 		goto out;
14768 	}
14769 	if (mq->queue_id == 0xFFFF) {
14770 		status = -ENXIO;
14771 		goto out;
14772 	}
14773 	mq->type = LPFC_MQ;
14774 	mq->assoc_qid = cq->queue_id;
14775 	mq->subtype = subtype;
14776 	mq->host_index = 0;
14777 	mq->hba_index = 0;
14778 	mq->entry_repost = LPFC_MQ_REPOST;
14779 
14780 	/* link the mq onto the parent cq child list */
14781 	list_add_tail(&mq->list, &cq->child_list);
14782 out:
14783 	mempool_free(mbox, phba->mbox_mem_pool);
14784 	return status;
14785 }
14786 
14787 /**
14788  * lpfc_wq_create - Create a Work Queue on the HBA
14789  * @phba: HBA structure that indicates port to create a queue on.
14790  * @wq: The queue structure to use to create the work queue.
14791  * @cq: The completion queue to bind this work queue to.
14792  * @subtype: The subtype of the work queue indicating its functionality.
14793  *
14794  * This function creates a work queue, as detailed in @wq, on a port, described
14795  * by @phba by sending a WQ_CREATE mailbox command to the HBA.
14796  *
14797  * The @phba struct is used to send mailbox command to HBA. The @wq struct
14798  * is used to get the entry count and entry size that are necessary to
14799  * determine the number of pages to allocate and use for this queue. The @cq
14800  * is used to indicate which completion queue to bind this work queue to. This
14801  * function will send the WQ_CREATE mailbox command to the HBA to setup the
14802  * work queue. This function is asynchronous and will wait for the mailbox
14803  * command to finish before continuing.
14804  *
14805  * On success this function will return a zero. If unable to allocate enough
14806  * memory this function will return -ENOMEM. If the queue create mailbox command
14807  * fails this function will return -ENXIO.
14808  **/
14809 int
14810 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
14811 	       struct lpfc_queue *cq, uint32_t subtype)
14812 {
14813 	struct lpfc_mbx_wq_create *wq_create;
14814 	struct lpfc_dmabuf *dmabuf;
14815 	LPFC_MBOXQ_t *mbox;
14816 	int rc, length, status = 0;
14817 	uint32_t shdr_status, shdr_add_status;
14818 	union lpfc_sli4_cfg_shdr *shdr;
14819 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
14820 	struct dma_address *page;
14821 	void __iomem *bar_memmap_p;
14822 	uint32_t db_offset;
14823 	uint16_t pci_barset;
14824 
14825 	/* sanity check on queue memory */
14826 	if (!wq || !cq)
14827 		return -ENODEV;
14828 	if (!phba->sli4_hba.pc_sli4_params.supported)
14829 		hw_page_size = SLI4_PAGE_SIZE;
14830 
14831 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
14832 	if (!mbox)
14833 		return -ENOMEM;
14834 	length = (sizeof(struct lpfc_mbx_wq_create) -
14835 		  sizeof(struct lpfc_sli4_cfg_mhdr));
14836 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
14837 			 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
14838 			 length, LPFC_SLI4_MBX_EMBED);
14839 	wq_create = &mbox->u.mqe.un.wq_create;
14840 	shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
14841 	bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
14842 		    wq->page_count);
14843 	bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
14844 		    cq->queue_id);
14845 
14846 	/* wqv is the earliest version supported, NOT the latest */
14847 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
14848 	       phba->sli4_hba.pc_sli4_params.wqv);
14849 
14850 	switch (phba->sli4_hba.pc_sli4_params.wqv) {
14851 	case LPFC_Q_CREATE_VERSION_0:
14852 		switch (wq->entry_size) {
14853 		default:
14854 		case 64:
14855 			/* Nothing to do, version 0 ONLY supports 64 byte */
14856 			page = wq_create->u.request.page;
14857 			break;
14858 		case 128:
14859 			if (!(phba->sli4_hba.pc_sli4_params.wqsize &
14860 			    LPFC_WQ_SZ128_SUPPORT)) {
14861 				status = -ERANGE;
14862 				goto out;
14863 			}
14864 			/* If we get here the HBA MUST also support V1 and
14865 			 * we MUST use it
14866 			 */
14867 			bf_set(lpfc_mbox_hdr_version, &shdr->request,
14868 			       LPFC_Q_CREATE_VERSION_1);
14869 
14870 			bf_set(lpfc_mbx_wq_create_wqe_count,
14871 			       &wq_create->u.request_1, wq->entry_count);
14872 			bf_set(lpfc_mbx_wq_create_wqe_size,
14873 			       &wq_create->u.request_1,
14874 			       LPFC_WQ_WQE_SIZE_128);
14875 			bf_set(lpfc_mbx_wq_create_page_size,
14876 			       &wq_create->u.request_1,
14877 			       LPFC_WQ_PAGE_SIZE_4096);
14878 			page = wq_create->u.request_1.page;
14879 			break;
14880 		}
14881 		break;
14882 	case LPFC_Q_CREATE_VERSION_1:
14883 		bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
14884 		       wq->entry_count);
14885 		bf_set(lpfc_mbox_hdr_version, &shdr->request,
14886 		       LPFC_Q_CREATE_VERSION_1);
14887 
14888 		switch (wq->entry_size) {
14889 		default:
14890 		case 64:
14891 			bf_set(lpfc_mbx_wq_create_wqe_size,
14892 			       &wq_create->u.request_1,
14893 			       LPFC_WQ_WQE_SIZE_64);
14894 			break;
14895 		case 128:
14896 			if (!(phba->sli4_hba.pc_sli4_params.wqsize &
14897 				LPFC_WQ_SZ128_SUPPORT)) {
14898 				status = -ERANGE;
14899 				goto out;
14900 			}
14901 			bf_set(lpfc_mbx_wq_create_wqe_size,
14902 			       &wq_create->u.request_1,
14903 			       LPFC_WQ_WQE_SIZE_128);
14904 			break;
14905 		}
14906 		bf_set(lpfc_mbx_wq_create_page_size,
14907 		       &wq_create->u.request_1,
14908 		       LPFC_WQ_PAGE_SIZE_4096);
14909 		page = wq_create->u.request_1.page;
14910 		break;
14911 	default:
14912 		status = -ERANGE;
14913 		goto out;
14914 	}
14915 
14916 	list_for_each_entry(dmabuf, &wq->page_list, list) {
14917 		memset(dmabuf->virt, 0, hw_page_size);
14918 		page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
14919 		page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
14920 	}
14921 
14922 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
14923 		bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
14924 
14925 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
14926 	/* The IOCTL status is embedded in the mailbox subheader. */
14927 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
14928 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
14929 	if (shdr_status || shdr_add_status || rc) {
14930 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14931 				"2503 WQ_CREATE mailbox failed with "
14932 				"status x%x add_status x%x, mbx status x%x\n",
14933 				shdr_status, shdr_add_status, rc);
14934 		status = -ENXIO;
14935 		goto out;
14936 	}
14937 	wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
14938 	if (wq->queue_id == 0xFFFF) {
14939 		status = -ENXIO;
14940 		goto out;
14941 	}
14942 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
14943 		wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
14944 				       &wq_create->u.response);
14945 		if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
14946 		    (wq->db_format != LPFC_DB_RING_FORMAT)) {
14947 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14948 					"3265 WQ[%d] doorbell format not "
14949 					"supported: x%x\n", wq->queue_id,
14950 					wq->db_format);
14951 			status = -EINVAL;
14952 			goto out;
14953 		}
14954 		pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
14955 				    &wq_create->u.response);
14956 		bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
14957 		if (!bar_memmap_p) {
14958 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14959 					"3263 WQ[%d] failed to memmap pci "
14960 					"barset:x%x\n", wq->queue_id,
14961 					pci_barset);
14962 			status = -ENOMEM;
14963 			goto out;
14964 		}
14965 		db_offset = wq_create->u.response.doorbell_offset;
14966 		if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
14967 		    (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
14968 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14969 					"3252 WQ[%d] doorbell offset not "
14970 					"supported: x%x\n", wq->queue_id,
14971 					db_offset);
14972 			status = -EINVAL;
14973 			goto out;
14974 		}
14975 		wq->db_regaddr = bar_memmap_p + db_offset;
14976 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14977 				"3264 WQ[%d]: barset:x%x, offset:x%x, "
14978 				"format:x%x\n", wq->queue_id, pci_barset,
14979 				db_offset, wq->db_format);
14980 	} else {
14981 		wq->db_format = LPFC_DB_LIST_FORMAT;
14982 		wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
14983 	}
14984 	wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
14985 	if (wq->pring == NULL) {
14986 		status = -ENOMEM;
14987 		goto out;
14988 	}
14989 	wq->type = LPFC_WQ;
14990 	wq->assoc_qid = cq->queue_id;
14991 	wq->subtype = subtype;
14992 	wq->host_index = 0;
14993 	wq->hba_index = 0;
14994 	wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
14995 
14996 	/* link the wq onto the parent cq child list */
14997 	list_add_tail(&wq->list, &cq->child_list);
14998 out:
14999 	mempool_free(mbox, phba->mbox_mem_pool);
15000 	return status;
15001 }
15002 
15003 /**
15004  * lpfc_rq_create - Create a Receive Queue on the HBA
15005  * @phba: HBA structure that indicates port to create a queue on.
15006  * @hrq: The queue structure to use to create the header receive queue.
15007  * @drq: The queue structure to use to create the data receive queue.
15008  * @cq: The completion queue to bind this work queue to.
15009  *
15010  * This function creates a receive buffer queue pair , as detailed in @hrq and
15011  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15012  * to the HBA.
15013  *
15014  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15015  * struct is used to get the entry count that is necessary to determine the
15016  * number of pages to use for this queue. The @cq is used to indicate which
15017  * completion queue to bind received buffers that are posted to these queues to.
15018  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15019  * receive queue pair. This function is asynchronous and will wait for the
15020  * mailbox command to finish before continuing.
15021  *
15022  * On success this function will return a zero. If unable to allocate enough
15023  * memory this function will return -ENOMEM. If the queue create mailbox command
15024  * fails this function will return -ENXIO.
15025  **/
15026 int
15027 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15028 	       struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
15029 {
15030 	struct lpfc_mbx_rq_create *rq_create;
15031 	struct lpfc_dmabuf *dmabuf;
15032 	LPFC_MBOXQ_t *mbox;
15033 	int rc, length, status = 0;
15034 	uint32_t shdr_status, shdr_add_status;
15035 	union lpfc_sli4_cfg_shdr *shdr;
15036 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15037 	void __iomem *bar_memmap_p;
15038 	uint32_t db_offset;
15039 	uint16_t pci_barset;
15040 
15041 	/* sanity check on queue memory */
15042 	if (!hrq || !drq || !cq)
15043 		return -ENODEV;
15044 	if (!phba->sli4_hba.pc_sli4_params.supported)
15045 		hw_page_size = SLI4_PAGE_SIZE;
15046 
15047 	if (hrq->entry_count != drq->entry_count)
15048 		return -EINVAL;
15049 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15050 	if (!mbox)
15051 		return -ENOMEM;
15052 	length = (sizeof(struct lpfc_mbx_rq_create) -
15053 		  sizeof(struct lpfc_sli4_cfg_mhdr));
15054 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15055 			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15056 			 length, LPFC_SLI4_MBX_EMBED);
15057 	rq_create = &mbox->u.mqe.un.rq_create;
15058 	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15059 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
15060 	       phba->sli4_hba.pc_sli4_params.rqv);
15061 	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15062 		bf_set(lpfc_rq_context_rqe_count_1,
15063 		       &rq_create->u.request.context,
15064 		       hrq->entry_count);
15065 		rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
15066 		bf_set(lpfc_rq_context_rqe_size,
15067 		       &rq_create->u.request.context,
15068 		       LPFC_RQE_SIZE_8);
15069 		bf_set(lpfc_rq_context_page_size,
15070 		       &rq_create->u.request.context,
15071 		       LPFC_RQ_PAGE_SIZE_4096);
15072 	} else {
15073 		switch (hrq->entry_count) {
15074 		default:
15075 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15076 					"2535 Unsupported RQ count. (%d)\n",
15077 					hrq->entry_count);
15078 			if (hrq->entry_count < 512) {
15079 				status = -EINVAL;
15080 				goto out;
15081 			}
15082 			/* otherwise default to smallest count (drop through) */
15083 		case 512:
15084 			bf_set(lpfc_rq_context_rqe_count,
15085 			       &rq_create->u.request.context,
15086 			       LPFC_RQ_RING_SIZE_512);
15087 			break;
15088 		case 1024:
15089 			bf_set(lpfc_rq_context_rqe_count,
15090 			       &rq_create->u.request.context,
15091 			       LPFC_RQ_RING_SIZE_1024);
15092 			break;
15093 		case 2048:
15094 			bf_set(lpfc_rq_context_rqe_count,
15095 			       &rq_create->u.request.context,
15096 			       LPFC_RQ_RING_SIZE_2048);
15097 			break;
15098 		case 4096:
15099 			bf_set(lpfc_rq_context_rqe_count,
15100 			       &rq_create->u.request.context,
15101 			       LPFC_RQ_RING_SIZE_4096);
15102 			break;
15103 		}
15104 		bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
15105 		       LPFC_HDR_BUF_SIZE);
15106 	}
15107 	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15108 	       cq->queue_id);
15109 	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15110 	       hrq->page_count);
15111 	list_for_each_entry(dmabuf, &hrq->page_list, list) {
15112 		memset(dmabuf->virt, 0, hw_page_size);
15113 		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15114 					putPaddrLow(dmabuf->phys);
15115 		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15116 					putPaddrHigh(dmabuf->phys);
15117 	}
15118 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15119 		bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15120 
15121 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15122 	/* The IOCTL status is embedded in the mailbox subheader. */
15123 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15124 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15125 	if (shdr_status || shdr_add_status || rc) {
15126 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15127 				"2504 RQ_CREATE mailbox failed with "
15128 				"status x%x add_status x%x, mbx status x%x\n",
15129 				shdr_status, shdr_add_status, rc);
15130 		status = -ENXIO;
15131 		goto out;
15132 	}
15133 	hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15134 	if (hrq->queue_id == 0xFFFF) {
15135 		status = -ENXIO;
15136 		goto out;
15137 	}
15138 
15139 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
15140 		hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
15141 					&rq_create->u.response);
15142 		if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
15143 		    (hrq->db_format != LPFC_DB_RING_FORMAT)) {
15144 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15145 					"3262 RQ [%d] doorbell format not "
15146 					"supported: x%x\n", hrq->queue_id,
15147 					hrq->db_format);
15148 			status = -EINVAL;
15149 			goto out;
15150 		}
15151 
15152 		pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
15153 				    &rq_create->u.response);
15154 		bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
15155 		if (!bar_memmap_p) {
15156 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15157 					"3269 RQ[%d] failed to memmap pci "
15158 					"barset:x%x\n", hrq->queue_id,
15159 					pci_barset);
15160 			status = -ENOMEM;
15161 			goto out;
15162 		}
15163 
15164 		db_offset = rq_create->u.response.doorbell_offset;
15165 		if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
15166 		    (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
15167 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15168 					"3270 RQ[%d] doorbell offset not "
15169 					"supported: x%x\n", hrq->queue_id,
15170 					db_offset);
15171 			status = -EINVAL;
15172 			goto out;
15173 		}
15174 		hrq->db_regaddr = bar_memmap_p + db_offset;
15175 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15176 				"3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15177 				"format:x%x\n", hrq->queue_id, pci_barset,
15178 				db_offset, hrq->db_format);
15179 	} else {
15180 		hrq->db_format = LPFC_DB_RING_FORMAT;
15181 		hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15182 	}
15183 	hrq->type = LPFC_HRQ;
15184 	hrq->assoc_qid = cq->queue_id;
15185 	hrq->subtype = subtype;
15186 	hrq->host_index = 0;
15187 	hrq->hba_index = 0;
15188 	hrq->entry_repost = LPFC_RQ_REPOST;
15189 
15190 	/* now create the data queue */
15191 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15192 			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
15193 			 length, LPFC_SLI4_MBX_EMBED);
15194 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
15195 	       phba->sli4_hba.pc_sli4_params.rqv);
15196 	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
15197 		bf_set(lpfc_rq_context_rqe_count_1,
15198 		       &rq_create->u.request.context, hrq->entry_count);
15199 		if (subtype == LPFC_NVMET)
15200 			rq_create->u.request.context.buffer_size =
15201 				LPFC_NVMET_DATA_BUF_SIZE;
15202 		else
15203 			rq_create->u.request.context.buffer_size =
15204 				LPFC_DATA_BUF_SIZE;
15205 		bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
15206 		       LPFC_RQE_SIZE_8);
15207 		bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
15208 		       (PAGE_SIZE/SLI4_PAGE_SIZE));
15209 	} else {
15210 		switch (drq->entry_count) {
15211 		default:
15212 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15213 					"2536 Unsupported RQ count. (%d)\n",
15214 					drq->entry_count);
15215 			if (drq->entry_count < 512) {
15216 				status = -EINVAL;
15217 				goto out;
15218 			}
15219 			/* otherwise default to smallest count (drop through) */
15220 		case 512:
15221 			bf_set(lpfc_rq_context_rqe_count,
15222 			       &rq_create->u.request.context,
15223 			       LPFC_RQ_RING_SIZE_512);
15224 			break;
15225 		case 1024:
15226 			bf_set(lpfc_rq_context_rqe_count,
15227 			       &rq_create->u.request.context,
15228 			       LPFC_RQ_RING_SIZE_1024);
15229 			break;
15230 		case 2048:
15231 			bf_set(lpfc_rq_context_rqe_count,
15232 			       &rq_create->u.request.context,
15233 			       LPFC_RQ_RING_SIZE_2048);
15234 			break;
15235 		case 4096:
15236 			bf_set(lpfc_rq_context_rqe_count,
15237 			       &rq_create->u.request.context,
15238 			       LPFC_RQ_RING_SIZE_4096);
15239 			break;
15240 		}
15241 		if (subtype == LPFC_NVMET)
15242 			bf_set(lpfc_rq_context_buf_size,
15243 			       &rq_create->u.request.context,
15244 			       LPFC_NVMET_DATA_BUF_SIZE);
15245 		else
15246 			bf_set(lpfc_rq_context_buf_size,
15247 			       &rq_create->u.request.context,
15248 			       LPFC_DATA_BUF_SIZE);
15249 	}
15250 	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
15251 	       cq->queue_id);
15252 	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
15253 	       drq->page_count);
15254 	list_for_each_entry(dmabuf, &drq->page_list, list) {
15255 		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
15256 					putPaddrLow(dmabuf->phys);
15257 		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
15258 					putPaddrHigh(dmabuf->phys);
15259 	}
15260 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
15261 		bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
15262 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15263 	/* The IOCTL status is embedded in the mailbox subheader. */
15264 	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
15265 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15266 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15267 	if (shdr_status || shdr_add_status || rc) {
15268 		status = -ENXIO;
15269 		goto out;
15270 	}
15271 	drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15272 	if (drq->queue_id == 0xFFFF) {
15273 		status = -ENXIO;
15274 		goto out;
15275 	}
15276 	drq->type = LPFC_DRQ;
15277 	drq->assoc_qid = cq->queue_id;
15278 	drq->subtype = subtype;
15279 	drq->host_index = 0;
15280 	drq->hba_index = 0;
15281 	drq->entry_repost = LPFC_RQ_REPOST;
15282 
15283 	/* link the header and data RQs onto the parent cq child list */
15284 	list_add_tail(&hrq->list, &cq->child_list);
15285 	list_add_tail(&drq->list, &cq->child_list);
15286 
15287 out:
15288 	mempool_free(mbox, phba->mbox_mem_pool);
15289 	return status;
15290 }
15291 
15292 /**
15293  * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
15294  * @phba: HBA structure that indicates port to create a queue on.
15295  * @hrqp: The queue structure array to use to create the header receive queues.
15296  * @drqp: The queue structure array to use to create the data receive queues.
15297  * @cqp: The completion queue array to bind these receive queues to.
15298  *
15299  * This function creates a receive buffer queue pair , as detailed in @hrq and
15300  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15301  * to the HBA.
15302  *
15303  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15304  * struct is used to get the entry count that is necessary to determine the
15305  * number of pages to use for this queue. The @cq is used to indicate which
15306  * completion queue to bind received buffers that are posted to these queues to.
15307  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15308  * receive queue pair. This function is asynchronous and will wait for the
15309  * mailbox command to finish before continuing.
15310  *
15311  * On success this function will return a zero. If unable to allocate enough
15312  * memory this function will return -ENOMEM. If the queue create mailbox command
15313  * fails this function will return -ENXIO.
15314  **/
15315 int
15316 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
15317 		struct lpfc_queue **drqp, struct lpfc_queue **cqp,
15318 		uint32_t subtype)
15319 {
15320 	struct lpfc_queue *hrq, *drq, *cq;
15321 	struct lpfc_mbx_rq_create_v2 *rq_create;
15322 	struct lpfc_dmabuf *dmabuf;
15323 	LPFC_MBOXQ_t *mbox;
15324 	int rc, length, alloclen, status = 0;
15325 	int cnt, idx, numrq, page_idx = 0;
15326 	uint32_t shdr_status, shdr_add_status;
15327 	union lpfc_sli4_cfg_shdr *shdr;
15328 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15329 
15330 	numrq = phba->cfg_nvmet_mrq;
15331 	/* sanity check on array memory */
15332 	if (!hrqp || !drqp || !cqp || !numrq)
15333 		return -ENODEV;
15334 	if (!phba->sli4_hba.pc_sli4_params.supported)
15335 		hw_page_size = SLI4_PAGE_SIZE;
15336 
15337 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15338 	if (!mbox)
15339 		return -ENOMEM;
15340 
15341 	length = sizeof(struct lpfc_mbx_rq_create_v2);
15342 	length += ((2 * numrq * hrqp[0]->page_count) *
15343 		   sizeof(struct dma_address));
15344 
15345 	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15346 				    LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
15347 				    LPFC_SLI4_MBX_NEMBED);
15348 	if (alloclen < length) {
15349 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15350 				"3099 Allocated DMA memory size (%d) is "
15351 				"less than the requested DMA memory size "
15352 				"(%d)\n", alloclen, length);
15353 		status = -ENOMEM;
15354 		goto out;
15355 	}
15356 
15357 
15358 
15359 	rq_create = mbox->sge_array->addr[0];
15360 	shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
15361 
15362 	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
15363 	cnt = 0;
15364 
15365 	for (idx = 0; idx < numrq; idx++) {
15366 		hrq = hrqp[idx];
15367 		drq = drqp[idx];
15368 		cq  = cqp[idx];
15369 
15370 		/* sanity check on queue memory */
15371 		if (!hrq || !drq || !cq) {
15372 			status = -ENODEV;
15373 			goto out;
15374 		}
15375 
15376 		if (hrq->entry_count != drq->entry_count) {
15377 			status = -EINVAL;
15378 			goto out;
15379 		}
15380 
15381 		if (idx == 0) {
15382 			bf_set(lpfc_mbx_rq_create_num_pages,
15383 			       &rq_create->u.request,
15384 			       hrq->page_count);
15385 			bf_set(lpfc_mbx_rq_create_rq_cnt,
15386 			       &rq_create->u.request, (numrq * 2));
15387 			bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
15388 			       1);
15389 			bf_set(lpfc_rq_context_base_cq,
15390 			       &rq_create->u.request.context,
15391 			       cq->queue_id);
15392 			bf_set(lpfc_rq_context_data_size,
15393 			       &rq_create->u.request.context,
15394 			       LPFC_NVMET_DATA_BUF_SIZE);
15395 			bf_set(lpfc_rq_context_hdr_size,
15396 			       &rq_create->u.request.context,
15397 			       LPFC_HDR_BUF_SIZE);
15398 			bf_set(lpfc_rq_context_rqe_count_1,
15399 			       &rq_create->u.request.context,
15400 			       hrq->entry_count);
15401 			bf_set(lpfc_rq_context_rqe_size,
15402 			       &rq_create->u.request.context,
15403 			       LPFC_RQE_SIZE_8);
15404 			bf_set(lpfc_rq_context_page_size,
15405 			       &rq_create->u.request.context,
15406 			       (PAGE_SIZE/SLI4_PAGE_SIZE));
15407 		}
15408 		rc = 0;
15409 		list_for_each_entry(dmabuf, &hrq->page_list, list) {
15410 			memset(dmabuf->virt, 0, hw_page_size);
15411 			cnt = page_idx + dmabuf->buffer_tag;
15412 			rq_create->u.request.page[cnt].addr_lo =
15413 					putPaddrLow(dmabuf->phys);
15414 			rq_create->u.request.page[cnt].addr_hi =
15415 					putPaddrHigh(dmabuf->phys);
15416 			rc++;
15417 		}
15418 		page_idx += rc;
15419 
15420 		rc = 0;
15421 		list_for_each_entry(dmabuf, &drq->page_list, list) {
15422 			memset(dmabuf->virt, 0, hw_page_size);
15423 			cnt = page_idx + dmabuf->buffer_tag;
15424 			rq_create->u.request.page[cnt].addr_lo =
15425 					putPaddrLow(dmabuf->phys);
15426 			rq_create->u.request.page[cnt].addr_hi =
15427 					putPaddrHigh(dmabuf->phys);
15428 			rc++;
15429 		}
15430 		page_idx += rc;
15431 
15432 		hrq->db_format = LPFC_DB_RING_FORMAT;
15433 		hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15434 		hrq->type = LPFC_HRQ;
15435 		hrq->assoc_qid = cq->queue_id;
15436 		hrq->subtype = subtype;
15437 		hrq->host_index = 0;
15438 		hrq->hba_index = 0;
15439 		hrq->entry_repost = LPFC_RQ_REPOST;
15440 
15441 		drq->db_format = LPFC_DB_RING_FORMAT;
15442 		drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
15443 		drq->type = LPFC_DRQ;
15444 		drq->assoc_qid = cq->queue_id;
15445 		drq->subtype = subtype;
15446 		drq->host_index = 0;
15447 		drq->hba_index = 0;
15448 		drq->entry_repost = LPFC_RQ_REPOST;
15449 
15450 		list_add_tail(&hrq->list, &cq->child_list);
15451 		list_add_tail(&drq->list, &cq->child_list);
15452 	}
15453 
15454 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15455 	/* The IOCTL status is embedded in the mailbox subheader. */
15456 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15457 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15458 	if (shdr_status || shdr_add_status || rc) {
15459 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15460 				"3120 RQ_CREATE mailbox failed with "
15461 				"status x%x add_status x%x, mbx status x%x\n",
15462 				shdr_status, shdr_add_status, rc);
15463 		status = -ENXIO;
15464 		goto out;
15465 	}
15466 	rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
15467 	if (rc == 0xFFFF) {
15468 		status = -ENXIO;
15469 		goto out;
15470 	}
15471 
15472 	/* Initialize all RQs with associated queue id */
15473 	for (idx = 0; idx < numrq; idx++) {
15474 		hrq = hrqp[idx];
15475 		hrq->queue_id = rc + (2 * idx);
15476 		drq = drqp[idx];
15477 		drq->queue_id = rc + (2 * idx) + 1;
15478 	}
15479 
15480 out:
15481 	lpfc_sli4_mbox_cmd_free(phba, mbox);
15482 	return status;
15483 }
15484 
15485 /**
15486  * lpfc_eq_destroy - Destroy an event Queue on the HBA
15487  * @eq: The queue structure associated with the queue to destroy.
15488  *
15489  * This function destroys a queue, as detailed in @eq by sending an mailbox
15490  * command, specific to the type of queue, to the HBA.
15491  *
15492  * The @eq struct is used to get the queue ID of the queue to destroy.
15493  *
15494  * On success this function will return a zero. If the queue destroy mailbox
15495  * command fails this function will return -ENXIO.
15496  **/
15497 int
15498 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
15499 {
15500 	LPFC_MBOXQ_t *mbox;
15501 	int rc, length, status = 0;
15502 	uint32_t shdr_status, shdr_add_status;
15503 	union lpfc_sli4_cfg_shdr *shdr;
15504 
15505 	/* sanity check on queue memory */
15506 	if (!eq)
15507 		return -ENODEV;
15508 	mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
15509 	if (!mbox)
15510 		return -ENOMEM;
15511 	length = (sizeof(struct lpfc_mbx_eq_destroy) -
15512 		  sizeof(struct lpfc_sli4_cfg_mhdr));
15513 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15514 			 LPFC_MBOX_OPCODE_EQ_DESTROY,
15515 			 length, LPFC_SLI4_MBX_EMBED);
15516 	bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
15517 	       eq->queue_id);
15518 	mbox->vport = eq->phba->pport;
15519 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15520 
15521 	rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
15522 	/* The IOCTL status is embedded in the mailbox subheader. */
15523 	shdr = (union lpfc_sli4_cfg_shdr *)
15524 		&mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
15525 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15526 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15527 	if (shdr_status || shdr_add_status || rc) {
15528 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15529 				"2505 EQ_DESTROY mailbox failed with "
15530 				"status x%x add_status x%x, mbx status x%x\n",
15531 				shdr_status, shdr_add_status, rc);
15532 		status = -ENXIO;
15533 	}
15534 
15535 	/* Remove eq from any list */
15536 	list_del_init(&eq->list);
15537 	mempool_free(mbox, eq->phba->mbox_mem_pool);
15538 	return status;
15539 }
15540 
15541 /**
15542  * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
15543  * @cq: The queue structure associated with the queue to destroy.
15544  *
15545  * This function destroys a queue, as detailed in @cq by sending an mailbox
15546  * command, specific to the type of queue, to the HBA.
15547  *
15548  * The @cq struct is used to get the queue ID of the queue to destroy.
15549  *
15550  * On success this function will return a zero. If the queue destroy mailbox
15551  * command fails this function will return -ENXIO.
15552  **/
15553 int
15554 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
15555 {
15556 	LPFC_MBOXQ_t *mbox;
15557 	int rc, length, status = 0;
15558 	uint32_t shdr_status, shdr_add_status;
15559 	union lpfc_sli4_cfg_shdr *shdr;
15560 
15561 	/* sanity check on queue memory */
15562 	if (!cq)
15563 		return -ENODEV;
15564 	mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
15565 	if (!mbox)
15566 		return -ENOMEM;
15567 	length = (sizeof(struct lpfc_mbx_cq_destroy) -
15568 		  sizeof(struct lpfc_sli4_cfg_mhdr));
15569 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15570 			 LPFC_MBOX_OPCODE_CQ_DESTROY,
15571 			 length, LPFC_SLI4_MBX_EMBED);
15572 	bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
15573 	       cq->queue_id);
15574 	mbox->vport = cq->phba->pport;
15575 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15576 	rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
15577 	/* The IOCTL status is embedded in the mailbox subheader. */
15578 	shdr = (union lpfc_sli4_cfg_shdr *)
15579 		&mbox->u.mqe.un.wq_create.header.cfg_shdr;
15580 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15581 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15582 	if (shdr_status || shdr_add_status || rc) {
15583 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15584 				"2506 CQ_DESTROY mailbox failed with "
15585 				"status x%x add_status x%x, mbx status x%x\n",
15586 				shdr_status, shdr_add_status, rc);
15587 		status = -ENXIO;
15588 	}
15589 	/* Remove cq from any list */
15590 	list_del_init(&cq->list);
15591 	mempool_free(mbox, cq->phba->mbox_mem_pool);
15592 	return status;
15593 }
15594 
15595 /**
15596  * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
15597  * @qm: The queue structure associated with the queue to destroy.
15598  *
15599  * This function destroys a queue, as detailed in @mq by sending an mailbox
15600  * command, specific to the type of queue, to the HBA.
15601  *
15602  * The @mq struct is used to get the queue ID of the queue to destroy.
15603  *
15604  * On success this function will return a zero. If the queue destroy mailbox
15605  * command fails this function will return -ENXIO.
15606  **/
15607 int
15608 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
15609 {
15610 	LPFC_MBOXQ_t *mbox;
15611 	int rc, length, status = 0;
15612 	uint32_t shdr_status, shdr_add_status;
15613 	union lpfc_sli4_cfg_shdr *shdr;
15614 
15615 	/* sanity check on queue memory */
15616 	if (!mq)
15617 		return -ENODEV;
15618 	mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
15619 	if (!mbox)
15620 		return -ENOMEM;
15621 	length = (sizeof(struct lpfc_mbx_mq_destroy) -
15622 		  sizeof(struct lpfc_sli4_cfg_mhdr));
15623 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15624 			 LPFC_MBOX_OPCODE_MQ_DESTROY,
15625 			 length, LPFC_SLI4_MBX_EMBED);
15626 	bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
15627 	       mq->queue_id);
15628 	mbox->vport = mq->phba->pport;
15629 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15630 	rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
15631 	/* The IOCTL status is embedded in the mailbox subheader. */
15632 	shdr = (union lpfc_sli4_cfg_shdr *)
15633 		&mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
15634 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15635 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15636 	if (shdr_status || shdr_add_status || rc) {
15637 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15638 				"2507 MQ_DESTROY mailbox failed with "
15639 				"status x%x add_status x%x, mbx status x%x\n",
15640 				shdr_status, shdr_add_status, rc);
15641 		status = -ENXIO;
15642 	}
15643 	/* Remove mq from any list */
15644 	list_del_init(&mq->list);
15645 	mempool_free(mbox, mq->phba->mbox_mem_pool);
15646 	return status;
15647 }
15648 
15649 /**
15650  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
15651  * @wq: The queue structure associated with the queue to destroy.
15652  *
15653  * This function destroys a queue, as detailed in @wq by sending an mailbox
15654  * command, specific to the type of queue, to the HBA.
15655  *
15656  * The @wq struct is used to get the queue ID of the queue to destroy.
15657  *
15658  * On success this function will return a zero. If the queue destroy mailbox
15659  * command fails this function will return -ENXIO.
15660  **/
15661 int
15662 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
15663 {
15664 	LPFC_MBOXQ_t *mbox;
15665 	int rc, length, status = 0;
15666 	uint32_t shdr_status, shdr_add_status;
15667 	union lpfc_sli4_cfg_shdr *shdr;
15668 
15669 	/* sanity check on queue memory */
15670 	if (!wq)
15671 		return -ENODEV;
15672 	mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
15673 	if (!mbox)
15674 		return -ENOMEM;
15675 	length = (sizeof(struct lpfc_mbx_wq_destroy) -
15676 		  sizeof(struct lpfc_sli4_cfg_mhdr));
15677 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15678 			 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
15679 			 length, LPFC_SLI4_MBX_EMBED);
15680 	bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
15681 	       wq->queue_id);
15682 	mbox->vport = wq->phba->pport;
15683 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15684 	rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
15685 	shdr = (union lpfc_sli4_cfg_shdr *)
15686 		&mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
15687 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15688 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15689 	if (shdr_status || shdr_add_status || rc) {
15690 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15691 				"2508 WQ_DESTROY mailbox failed with "
15692 				"status x%x add_status x%x, mbx status x%x\n",
15693 				shdr_status, shdr_add_status, rc);
15694 		status = -ENXIO;
15695 	}
15696 	/* Remove wq from any list */
15697 	list_del_init(&wq->list);
15698 	kfree(wq->pring);
15699 	wq->pring = NULL;
15700 	mempool_free(mbox, wq->phba->mbox_mem_pool);
15701 	return status;
15702 }
15703 
15704 /**
15705  * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
15706  * @rq: The queue structure associated with the queue to destroy.
15707  *
15708  * This function destroys a queue, as detailed in @rq by sending an mailbox
15709  * command, specific to the type of queue, to the HBA.
15710  *
15711  * The @rq struct is used to get the queue ID of the queue to destroy.
15712  *
15713  * On success this function will return a zero. If the queue destroy mailbox
15714  * command fails this function will return -ENXIO.
15715  **/
15716 int
15717 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
15718 		struct lpfc_queue *drq)
15719 {
15720 	LPFC_MBOXQ_t *mbox;
15721 	int rc, length, status = 0;
15722 	uint32_t shdr_status, shdr_add_status;
15723 	union lpfc_sli4_cfg_shdr *shdr;
15724 
15725 	/* sanity check on queue memory */
15726 	if (!hrq || !drq)
15727 		return -ENODEV;
15728 	mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
15729 	if (!mbox)
15730 		return -ENOMEM;
15731 	length = (sizeof(struct lpfc_mbx_rq_destroy) -
15732 		  sizeof(struct lpfc_sli4_cfg_mhdr));
15733 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15734 			 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
15735 			 length, LPFC_SLI4_MBX_EMBED);
15736 	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
15737 	       hrq->queue_id);
15738 	mbox->vport = hrq->phba->pport;
15739 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
15740 	rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
15741 	/* The IOCTL status is embedded in the mailbox subheader. */
15742 	shdr = (union lpfc_sli4_cfg_shdr *)
15743 		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
15744 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15745 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15746 	if (shdr_status || shdr_add_status || rc) {
15747 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15748 				"2509 RQ_DESTROY mailbox failed with "
15749 				"status x%x add_status x%x, mbx status x%x\n",
15750 				shdr_status, shdr_add_status, rc);
15751 		if (rc != MBX_TIMEOUT)
15752 			mempool_free(mbox, hrq->phba->mbox_mem_pool);
15753 		return -ENXIO;
15754 	}
15755 	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
15756 	       drq->queue_id);
15757 	rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
15758 	shdr = (union lpfc_sli4_cfg_shdr *)
15759 		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
15760 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15761 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15762 	if (shdr_status || shdr_add_status || rc) {
15763 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15764 				"2510 RQ_DESTROY mailbox failed with "
15765 				"status x%x add_status x%x, mbx status x%x\n",
15766 				shdr_status, shdr_add_status, rc);
15767 		status = -ENXIO;
15768 	}
15769 	list_del_init(&hrq->list);
15770 	list_del_init(&drq->list);
15771 	mempool_free(mbox, hrq->phba->mbox_mem_pool);
15772 	return status;
15773 }
15774 
15775 /**
15776  * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
15777  * @phba: The virtual port for which this call being executed.
15778  * @pdma_phys_addr0: Physical address of the 1st SGL page.
15779  * @pdma_phys_addr1: Physical address of the 2nd SGL page.
15780  * @xritag: the xritag that ties this io to the SGL pages.
15781  *
15782  * This routine will post the sgl pages for the IO that has the xritag
15783  * that is in the iocbq structure. The xritag is assigned during iocbq
15784  * creation and persists for as long as the driver is loaded.
15785  * if the caller has fewer than 256 scatter gather segments to map then
15786  * pdma_phys_addr1 should be 0.
15787  * If the caller needs to map more than 256 scatter gather segment then
15788  * pdma_phys_addr1 should be a valid physical address.
15789  * physical address for SGLs must be 64 byte aligned.
15790  * If you are going to map 2 SGL's then the first one must have 256 entries
15791  * the second sgl can have between 1 and 256 entries.
15792  *
15793  * Return codes:
15794  * 	0 - Success
15795  * 	-ENXIO, -ENOMEM - Failure
15796  **/
15797 int
15798 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
15799 		dma_addr_t pdma_phys_addr0,
15800 		dma_addr_t pdma_phys_addr1,
15801 		uint16_t xritag)
15802 {
15803 	struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
15804 	LPFC_MBOXQ_t *mbox;
15805 	int rc;
15806 	uint32_t shdr_status, shdr_add_status;
15807 	uint32_t mbox_tmo;
15808 	union lpfc_sli4_cfg_shdr *shdr;
15809 
15810 	if (xritag == NO_XRI) {
15811 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
15812 				"0364 Invalid param:\n");
15813 		return -EINVAL;
15814 	}
15815 
15816 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15817 	if (!mbox)
15818 		return -ENOMEM;
15819 
15820 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15821 			LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
15822 			sizeof(struct lpfc_mbx_post_sgl_pages) -
15823 			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
15824 
15825 	post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
15826 				&mbox->u.mqe.un.post_sgl_pages;
15827 	bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
15828 	bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
15829 
15830 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo	=
15831 				cpu_to_le32(putPaddrLow(pdma_phys_addr0));
15832 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
15833 				cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
15834 
15835 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo	=
15836 				cpu_to_le32(putPaddrLow(pdma_phys_addr1));
15837 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
15838 				cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
15839 	if (!phba->sli4_hba.intr_enable)
15840 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
15841 	else {
15842 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
15843 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
15844 	}
15845 	/* The IOCTL status is embedded in the mailbox subheader. */
15846 	shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
15847 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
15848 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
15849 	if (rc != MBX_TIMEOUT)
15850 		mempool_free(mbox, phba->mbox_mem_pool);
15851 	if (shdr_status || shdr_add_status || rc) {
15852 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15853 				"2511 POST_SGL mailbox failed with "
15854 				"status x%x add_status x%x, mbx status x%x\n",
15855 				shdr_status, shdr_add_status, rc);
15856 	}
15857 	return 0;
15858 }
15859 
15860 /**
15861  * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
15862  * @phba: pointer to lpfc hba data structure.
15863  *
15864  * This routine is invoked to post rpi header templates to the
15865  * HBA consistent with the SLI-4 interface spec.  This routine
15866  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
15867  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
15868  *
15869  * Returns
15870  *	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
15871  *	LPFC_RPI_ALLOC_ERROR if no rpis are available.
15872  **/
15873 static uint16_t
15874 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
15875 {
15876 	unsigned long xri;
15877 
15878 	/*
15879 	 * Fetch the next logical xri.  Because this index is logical,
15880 	 * the driver starts at 0 each time.
15881 	 */
15882 	spin_lock_irq(&phba->hbalock);
15883 	xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
15884 				 phba->sli4_hba.max_cfg_param.max_xri, 0);
15885 	if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
15886 		spin_unlock_irq(&phba->hbalock);
15887 		return NO_XRI;
15888 	} else {
15889 		set_bit(xri, phba->sli4_hba.xri_bmask);
15890 		phba->sli4_hba.max_cfg_param.xri_used++;
15891 	}
15892 	spin_unlock_irq(&phba->hbalock);
15893 	return xri;
15894 }
15895 
15896 /**
15897  * lpfc_sli4_free_xri - Release an xri for reuse.
15898  * @phba: pointer to lpfc hba data structure.
15899  *
15900  * This routine is invoked to release an xri to the pool of
15901  * available rpis maintained by the driver.
15902  **/
15903 static void
15904 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
15905 {
15906 	if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
15907 		phba->sli4_hba.max_cfg_param.xri_used--;
15908 	}
15909 }
15910 
15911 /**
15912  * lpfc_sli4_free_xri - Release an xri for reuse.
15913  * @phba: pointer to lpfc hba data structure.
15914  *
15915  * This routine is invoked to release an xri to the pool of
15916  * available rpis maintained by the driver.
15917  **/
15918 void
15919 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
15920 {
15921 	spin_lock_irq(&phba->hbalock);
15922 	__lpfc_sli4_free_xri(phba, xri);
15923 	spin_unlock_irq(&phba->hbalock);
15924 }
15925 
15926 /**
15927  * lpfc_sli4_next_xritag - Get an xritag for the io
15928  * @phba: Pointer to HBA context object.
15929  *
15930  * This function gets an xritag for the iocb. If there is no unused xritag
15931  * it will return 0xffff.
15932  * The function returns the allocated xritag if successful, else returns zero.
15933  * Zero is not a valid xritag.
15934  * The caller is not required to hold any lock.
15935  **/
15936 uint16_t
15937 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
15938 {
15939 	uint16_t xri_index;
15940 
15941 	xri_index = lpfc_sli4_alloc_xri(phba);
15942 	if (xri_index == NO_XRI)
15943 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15944 				"2004 Failed to allocate XRI.last XRITAG is %d"
15945 				" Max XRI is %d, Used XRI is %d\n",
15946 				xri_index,
15947 				phba->sli4_hba.max_cfg_param.max_xri,
15948 				phba->sli4_hba.max_cfg_param.xri_used);
15949 	return xri_index;
15950 }
15951 
15952 /**
15953  * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
15954  * @phba: pointer to lpfc hba data structure.
15955  * @post_sgl_list: pointer to els sgl entry list.
15956  * @count: number of els sgl entries on the list.
15957  *
15958  * This routine is invoked to post a block of driver's sgl pages to the
15959  * HBA using non-embedded mailbox command. No Lock is held. This routine
15960  * is only called when the driver is loading and after all IO has been
15961  * stopped.
15962  **/
15963 static int
15964 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
15965 			    struct list_head *post_sgl_list,
15966 			    int post_cnt)
15967 {
15968 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
15969 	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
15970 	struct sgl_page_pairs *sgl_pg_pairs;
15971 	void *viraddr;
15972 	LPFC_MBOXQ_t *mbox;
15973 	uint32_t reqlen, alloclen, pg_pairs;
15974 	uint32_t mbox_tmo;
15975 	uint16_t xritag_start = 0;
15976 	int rc = 0;
15977 	uint32_t shdr_status, shdr_add_status;
15978 	union lpfc_sli4_cfg_shdr *shdr;
15979 
15980 	reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
15981 		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
15982 	if (reqlen > SLI4_PAGE_SIZE) {
15983 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15984 				"2559 Block sgl registration required DMA "
15985 				"size (%d) great than a page\n", reqlen);
15986 		return -ENOMEM;
15987 	}
15988 
15989 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15990 	if (!mbox)
15991 		return -ENOMEM;
15992 
15993 	/* Allocate DMA memory and set up the non-embedded mailbox command */
15994 	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
15995 			 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
15996 			 LPFC_SLI4_MBX_NEMBED);
15997 
15998 	if (alloclen < reqlen) {
15999 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16000 				"0285 Allocated DMA memory size (%d) is "
16001 				"less than the requested DMA memory "
16002 				"size (%d)\n", alloclen, reqlen);
16003 		lpfc_sli4_mbox_cmd_free(phba, mbox);
16004 		return -ENOMEM;
16005 	}
16006 	/* Set up the SGL pages in the non-embedded DMA pages */
16007 	viraddr = mbox->sge_array->addr[0];
16008 	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16009 	sgl_pg_pairs = &sgl->sgl_pg_pairs;
16010 
16011 	pg_pairs = 0;
16012 	list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
16013 		/* Set up the sge entry */
16014 		sgl_pg_pairs->sgl_pg0_addr_lo =
16015 				cpu_to_le32(putPaddrLow(sglq_entry->phys));
16016 		sgl_pg_pairs->sgl_pg0_addr_hi =
16017 				cpu_to_le32(putPaddrHigh(sglq_entry->phys));
16018 		sgl_pg_pairs->sgl_pg1_addr_lo =
16019 				cpu_to_le32(putPaddrLow(0));
16020 		sgl_pg_pairs->sgl_pg1_addr_hi =
16021 				cpu_to_le32(putPaddrHigh(0));
16022 
16023 		/* Keep the first xritag on the list */
16024 		if (pg_pairs == 0)
16025 			xritag_start = sglq_entry->sli4_xritag;
16026 		sgl_pg_pairs++;
16027 		pg_pairs++;
16028 	}
16029 
16030 	/* Complete initialization and perform endian conversion. */
16031 	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16032 	bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
16033 	sgl->word0 = cpu_to_le32(sgl->word0);
16034 
16035 	if (!phba->sli4_hba.intr_enable)
16036 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16037 	else {
16038 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16039 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16040 	}
16041 	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16042 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16043 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16044 	if (rc != MBX_TIMEOUT)
16045 		lpfc_sli4_mbox_cmd_free(phba, mbox);
16046 	if (shdr_status || shdr_add_status || rc) {
16047 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16048 				"2513 POST_SGL_BLOCK mailbox command failed "
16049 				"status x%x add_status x%x mbx status x%x\n",
16050 				shdr_status, shdr_add_status, rc);
16051 		rc = -ENXIO;
16052 	}
16053 	return rc;
16054 }
16055 
16056 /**
16057  * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
16058  * @phba: pointer to lpfc hba data structure.
16059  * @sblist: pointer to scsi buffer list.
16060  * @count: number of scsi buffers on the list.
16061  *
16062  * This routine is invoked to post a block of @count scsi sgl pages from a
16063  * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
16064  * No Lock is held.
16065  *
16066  **/
16067 int
16068 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
16069 			      struct list_head *sblist,
16070 			      int count)
16071 {
16072 	struct lpfc_scsi_buf *psb;
16073 	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
16074 	struct sgl_page_pairs *sgl_pg_pairs;
16075 	void *viraddr;
16076 	LPFC_MBOXQ_t *mbox;
16077 	uint32_t reqlen, alloclen, pg_pairs;
16078 	uint32_t mbox_tmo;
16079 	uint16_t xritag_start = 0;
16080 	int rc = 0;
16081 	uint32_t shdr_status, shdr_add_status;
16082 	dma_addr_t pdma_phys_bpl1;
16083 	union lpfc_sli4_cfg_shdr *shdr;
16084 
16085 	/* Calculate the requested length of the dma memory */
16086 	reqlen = count * sizeof(struct sgl_page_pairs) +
16087 		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
16088 	if (reqlen > SLI4_PAGE_SIZE) {
16089 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
16090 				"0217 Block sgl registration required DMA "
16091 				"size (%d) great than a page\n", reqlen);
16092 		return -ENOMEM;
16093 	}
16094 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16095 	if (!mbox) {
16096 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16097 				"0283 Failed to allocate mbox cmd memory\n");
16098 		return -ENOMEM;
16099 	}
16100 
16101 	/* Allocate DMA memory and set up the non-embedded mailbox command */
16102 	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16103 				LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
16104 				LPFC_SLI4_MBX_NEMBED);
16105 
16106 	if (alloclen < reqlen) {
16107 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
16108 				"2561 Allocated DMA memory size (%d) is "
16109 				"less than the requested DMA memory "
16110 				"size (%d)\n", alloclen, reqlen);
16111 		lpfc_sli4_mbox_cmd_free(phba, mbox);
16112 		return -ENOMEM;
16113 	}
16114 
16115 	/* Get the first SGE entry from the non-embedded DMA memory */
16116 	viraddr = mbox->sge_array->addr[0];
16117 
16118 	/* Set up the SGL pages in the non-embedded DMA pages */
16119 	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
16120 	sgl_pg_pairs = &sgl->sgl_pg_pairs;
16121 
16122 	pg_pairs = 0;
16123 	list_for_each_entry(psb, sblist, list) {
16124 		/* Set up the sge entry */
16125 		sgl_pg_pairs->sgl_pg0_addr_lo =
16126 			cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
16127 		sgl_pg_pairs->sgl_pg0_addr_hi =
16128 			cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
16129 		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
16130 			pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
16131 		else
16132 			pdma_phys_bpl1 = 0;
16133 		sgl_pg_pairs->sgl_pg1_addr_lo =
16134 			cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
16135 		sgl_pg_pairs->sgl_pg1_addr_hi =
16136 			cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
16137 		/* Keep the first xritag on the list */
16138 		if (pg_pairs == 0)
16139 			xritag_start = psb->cur_iocbq.sli4_xritag;
16140 		sgl_pg_pairs++;
16141 		pg_pairs++;
16142 	}
16143 	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
16144 	bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
16145 	/* Perform endian conversion if necessary */
16146 	sgl->word0 = cpu_to_le32(sgl->word0);
16147 
16148 	if (!phba->sli4_hba.intr_enable)
16149 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16150 	else {
16151 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
16152 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
16153 	}
16154 	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
16155 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16156 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16157 	if (rc != MBX_TIMEOUT)
16158 		lpfc_sli4_mbox_cmd_free(phba, mbox);
16159 	if (shdr_status || shdr_add_status || rc) {
16160 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16161 				"2564 POST_SGL_BLOCK mailbox command failed "
16162 				"status x%x add_status x%x mbx status x%x\n",
16163 				shdr_status, shdr_add_status, rc);
16164 		rc = -ENXIO;
16165 	}
16166 	return rc;
16167 }
16168 
16169 /**
16170  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
16171  * @phba: pointer to lpfc_hba struct that the frame was received on
16172  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16173  *
16174  * This function checks the fields in the @fc_hdr to see if the FC frame is a
16175  * valid type of frame that the LPFC driver will handle. This function will
16176  * return a zero if the frame is a valid frame or a non zero value when the
16177  * frame does not pass the check.
16178  **/
16179 static int
16180 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
16181 {
16182 	/*  make rctl_names static to save stack space */
16183 	struct fc_vft_header *fc_vft_hdr;
16184 	uint32_t *header = (uint32_t *) fc_hdr;
16185 
16186 #define FC_RCTL_MDS_DIAGS	0xF4
16187 
16188 	switch (fc_hdr->fh_r_ctl) {
16189 	case FC_RCTL_DD_UNCAT:		/* uncategorized information */
16190 	case FC_RCTL_DD_SOL_DATA:	/* solicited data */
16191 	case FC_RCTL_DD_UNSOL_CTL:	/* unsolicited control */
16192 	case FC_RCTL_DD_SOL_CTL:	/* solicited control or reply */
16193 	case FC_RCTL_DD_UNSOL_DATA:	/* unsolicited data */
16194 	case FC_RCTL_DD_DATA_DESC:	/* data descriptor */
16195 	case FC_RCTL_DD_UNSOL_CMD:	/* unsolicited command */
16196 	case FC_RCTL_DD_CMD_STATUS:	/* command status */
16197 	case FC_RCTL_ELS_REQ:	/* extended link services request */
16198 	case FC_RCTL_ELS_REP:	/* extended link services reply */
16199 	case FC_RCTL_ELS4_REQ:	/* FC-4 ELS request */
16200 	case FC_RCTL_ELS4_REP:	/* FC-4 ELS reply */
16201 	case FC_RCTL_BA_NOP:  	/* basic link service NOP */
16202 	case FC_RCTL_BA_ABTS: 	/* basic link service abort */
16203 	case FC_RCTL_BA_RMC: 	/* remove connection */
16204 	case FC_RCTL_BA_ACC:	/* basic accept */
16205 	case FC_RCTL_BA_RJT:	/* basic reject */
16206 	case FC_RCTL_BA_PRMT:
16207 	case FC_RCTL_ACK_1:	/* acknowledge_1 */
16208 	case FC_RCTL_ACK_0:	/* acknowledge_0 */
16209 	case FC_RCTL_P_RJT:	/* port reject */
16210 	case FC_RCTL_F_RJT:	/* fabric reject */
16211 	case FC_RCTL_P_BSY:	/* port busy */
16212 	case FC_RCTL_F_BSY:	/* fabric busy to data frame */
16213 	case FC_RCTL_F_BSYL:	/* fabric busy to link control frame */
16214 	case FC_RCTL_LCR:	/* link credit reset */
16215 	case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
16216 	case FC_RCTL_END:	/* end */
16217 		break;
16218 	case FC_RCTL_VFTH:	/* Virtual Fabric tagging Header */
16219 		fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16220 		fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
16221 		return lpfc_fc_frame_check(phba, fc_hdr);
16222 	default:
16223 		goto drop;
16224 	}
16225 
16226 #define FC_TYPE_VENDOR_UNIQUE	0xFF
16227 
16228 	switch (fc_hdr->fh_type) {
16229 	case FC_TYPE_BLS:
16230 	case FC_TYPE_ELS:
16231 	case FC_TYPE_FCP:
16232 	case FC_TYPE_CT:
16233 	case FC_TYPE_NVME:
16234 	case FC_TYPE_VENDOR_UNIQUE:
16235 		break;
16236 	case FC_TYPE_IP:
16237 	case FC_TYPE_ILS:
16238 	default:
16239 		goto drop;
16240 	}
16241 
16242 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
16243 			"2538 Received frame rctl:x%x, type:x%x, "
16244 			"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
16245 			fc_hdr->fh_r_ctl, fc_hdr->fh_type,
16246 			be32_to_cpu(header[0]), be32_to_cpu(header[1]),
16247 			be32_to_cpu(header[2]), be32_to_cpu(header[3]),
16248 			be32_to_cpu(header[4]), be32_to_cpu(header[5]),
16249 			be32_to_cpu(header[6]));
16250 	return 0;
16251 drop:
16252 	lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
16253 			"2539 Dropped frame rctl:x%x type:x%x\n",
16254 			fc_hdr->fh_r_ctl, fc_hdr->fh_type);
16255 	return 1;
16256 }
16257 
16258 /**
16259  * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
16260  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16261  *
16262  * This function processes the FC header to retrieve the VFI from the VF
16263  * header, if one exists. This function will return the VFI if one exists
16264  * or 0 if no VSAN Header exists.
16265  **/
16266 static uint32_t
16267 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
16268 {
16269 	struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
16270 
16271 	if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
16272 		return 0;
16273 	return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
16274 }
16275 
16276 /**
16277  * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
16278  * @phba: Pointer to the HBA structure to search for the vport on
16279  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
16280  * @fcfi: The FC Fabric ID that the frame came from
16281  *
16282  * This function searches the @phba for a vport that matches the content of the
16283  * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
16284  * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
16285  * returns the matching vport pointer or NULL if unable to match frame to a
16286  * vport.
16287  **/
16288 static struct lpfc_vport *
16289 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
16290 		       uint16_t fcfi, uint32_t did)
16291 {
16292 	struct lpfc_vport **vports;
16293 	struct lpfc_vport *vport = NULL;
16294 	int i;
16295 
16296 	if (did == Fabric_DID)
16297 		return phba->pport;
16298 	if ((phba->pport->fc_flag & FC_PT2PT) &&
16299 		!(phba->link_state == LPFC_HBA_READY))
16300 		return phba->pport;
16301 
16302 	vports = lpfc_create_vport_work_array(phba);
16303 	if (vports != NULL) {
16304 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
16305 			if (phba->fcf.fcfi == fcfi &&
16306 			    vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
16307 			    vports[i]->fc_myDID == did) {
16308 				vport = vports[i];
16309 				break;
16310 			}
16311 		}
16312 	}
16313 	lpfc_destroy_vport_work_array(phba, vports);
16314 	return vport;
16315 }
16316 
16317 /**
16318  * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
16319  * @vport: The vport to work on.
16320  *
16321  * This function updates the receive sequence time stamp for this vport. The
16322  * receive sequence time stamp indicates the time that the last frame of the
16323  * the sequence that has been idle for the longest amount of time was received.
16324  * the driver uses this time stamp to indicate if any received sequences have
16325  * timed out.
16326  **/
16327 static void
16328 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
16329 {
16330 	struct lpfc_dmabuf *h_buf;
16331 	struct hbq_dmabuf *dmabuf = NULL;
16332 
16333 	/* get the oldest sequence on the rcv list */
16334 	h_buf = list_get_first(&vport->rcv_buffer_list,
16335 			       struct lpfc_dmabuf, list);
16336 	if (!h_buf)
16337 		return;
16338 	dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16339 	vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
16340 }
16341 
16342 /**
16343  * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
16344  * @vport: The vport that the received sequences were sent to.
16345  *
16346  * This function cleans up all outstanding received sequences. This is called
16347  * by the driver when a link event or user action invalidates all the received
16348  * sequences.
16349  **/
16350 void
16351 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
16352 {
16353 	struct lpfc_dmabuf *h_buf, *hnext;
16354 	struct lpfc_dmabuf *d_buf, *dnext;
16355 	struct hbq_dmabuf *dmabuf = NULL;
16356 
16357 	/* start with the oldest sequence on the rcv list */
16358 	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16359 		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16360 		list_del_init(&dmabuf->hbuf.list);
16361 		list_for_each_entry_safe(d_buf, dnext,
16362 					 &dmabuf->dbuf.list, list) {
16363 			list_del_init(&d_buf->list);
16364 			lpfc_in_buf_free(vport->phba, d_buf);
16365 		}
16366 		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16367 	}
16368 }
16369 
16370 /**
16371  * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
16372  * @vport: The vport that the received sequences were sent to.
16373  *
16374  * This function determines whether any received sequences have timed out by
16375  * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
16376  * indicates that there is at least one timed out sequence this routine will
16377  * go through the received sequences one at a time from most inactive to most
16378  * active to determine which ones need to be cleaned up. Once it has determined
16379  * that a sequence needs to be cleaned up it will simply free up the resources
16380  * without sending an abort.
16381  **/
16382 void
16383 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
16384 {
16385 	struct lpfc_dmabuf *h_buf, *hnext;
16386 	struct lpfc_dmabuf *d_buf, *dnext;
16387 	struct hbq_dmabuf *dmabuf = NULL;
16388 	unsigned long timeout;
16389 	int abort_count = 0;
16390 
16391 	timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16392 		   vport->rcv_buffer_time_stamp);
16393 	if (list_empty(&vport->rcv_buffer_list) ||
16394 	    time_before(jiffies, timeout))
16395 		return;
16396 	/* start with the oldest sequence on the rcv list */
16397 	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
16398 		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16399 		timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
16400 			   dmabuf->time_stamp);
16401 		if (time_before(jiffies, timeout))
16402 			break;
16403 		abort_count++;
16404 		list_del_init(&dmabuf->hbuf.list);
16405 		list_for_each_entry_safe(d_buf, dnext,
16406 					 &dmabuf->dbuf.list, list) {
16407 			list_del_init(&d_buf->list);
16408 			lpfc_in_buf_free(vport->phba, d_buf);
16409 		}
16410 		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
16411 	}
16412 	if (abort_count)
16413 		lpfc_update_rcv_time_stamp(vport);
16414 }
16415 
16416 /**
16417  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
16418  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
16419  *
16420  * This function searches through the existing incomplete sequences that have
16421  * been sent to this @vport. If the frame matches one of the incomplete
16422  * sequences then the dbuf in the @dmabuf is added to the list of frames that
16423  * make up that sequence. If no sequence is found that matches this frame then
16424  * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
16425  * This function returns a pointer to the first dmabuf in the sequence list that
16426  * the frame was linked to.
16427  **/
16428 static struct hbq_dmabuf *
16429 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16430 {
16431 	struct fc_frame_header *new_hdr;
16432 	struct fc_frame_header *temp_hdr;
16433 	struct lpfc_dmabuf *d_buf;
16434 	struct lpfc_dmabuf *h_buf;
16435 	struct hbq_dmabuf *seq_dmabuf = NULL;
16436 	struct hbq_dmabuf *temp_dmabuf = NULL;
16437 	uint8_t	found = 0;
16438 
16439 	INIT_LIST_HEAD(&dmabuf->dbuf.list);
16440 	dmabuf->time_stamp = jiffies;
16441 	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16442 
16443 	/* Use the hdr_buf to find the sequence that this frame belongs to */
16444 	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16445 		temp_hdr = (struct fc_frame_header *)h_buf->virt;
16446 		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16447 		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16448 		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16449 			continue;
16450 		/* found a pending sequence that matches this frame */
16451 		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16452 		break;
16453 	}
16454 	if (!seq_dmabuf) {
16455 		/*
16456 		 * This indicates first frame received for this sequence.
16457 		 * Queue the buffer on the vport's rcv_buffer_list.
16458 		 */
16459 		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
16460 		lpfc_update_rcv_time_stamp(vport);
16461 		return dmabuf;
16462 	}
16463 	temp_hdr = seq_dmabuf->hbuf.virt;
16464 	if (be16_to_cpu(new_hdr->fh_seq_cnt) <
16465 		be16_to_cpu(temp_hdr->fh_seq_cnt)) {
16466 		list_del_init(&seq_dmabuf->hbuf.list);
16467 		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
16468 		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
16469 		lpfc_update_rcv_time_stamp(vport);
16470 		return dmabuf;
16471 	}
16472 	/* move this sequence to the tail to indicate a young sequence */
16473 	list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
16474 	seq_dmabuf->time_stamp = jiffies;
16475 	lpfc_update_rcv_time_stamp(vport);
16476 	if (list_empty(&seq_dmabuf->dbuf.list)) {
16477 		temp_hdr = dmabuf->hbuf.virt;
16478 		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
16479 		return seq_dmabuf;
16480 	}
16481 	/* find the correct place in the sequence to insert this frame */
16482 	d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
16483 	while (!found) {
16484 		temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16485 		temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
16486 		/*
16487 		 * If the frame's sequence count is greater than the frame on
16488 		 * the list then insert the frame right after this frame
16489 		 */
16490 		if (be16_to_cpu(new_hdr->fh_seq_cnt) >
16491 			be16_to_cpu(temp_hdr->fh_seq_cnt)) {
16492 			list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
16493 			found = 1;
16494 			break;
16495 		}
16496 
16497 		if (&d_buf->list == &seq_dmabuf->dbuf.list)
16498 			break;
16499 		d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
16500 	}
16501 
16502 	if (found)
16503 		return seq_dmabuf;
16504 	return NULL;
16505 }
16506 
16507 /**
16508  * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
16509  * @vport: pointer to a vitural port
16510  * @dmabuf: pointer to a dmabuf that describes the FC sequence
16511  *
16512  * This function tries to abort from the partially assembed sequence, described
16513  * by the information from basic abbort @dmabuf. It checks to see whether such
16514  * partially assembled sequence held by the driver. If so, it shall free up all
16515  * the frames from the partially assembled sequence.
16516  *
16517  * Return
16518  * true  -- if there is matching partially assembled sequence present and all
16519  *          the frames freed with the sequence;
16520  * false -- if there is no matching partially assembled sequence present so
16521  *          nothing got aborted in the lower layer driver
16522  **/
16523 static bool
16524 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
16525 			    struct hbq_dmabuf *dmabuf)
16526 {
16527 	struct fc_frame_header *new_hdr;
16528 	struct fc_frame_header *temp_hdr;
16529 	struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
16530 	struct hbq_dmabuf *seq_dmabuf = NULL;
16531 
16532 	/* Use the hdr_buf to find the sequence that matches this frame */
16533 	INIT_LIST_HEAD(&dmabuf->dbuf.list);
16534 	INIT_LIST_HEAD(&dmabuf->hbuf.list);
16535 	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16536 	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
16537 		temp_hdr = (struct fc_frame_header *)h_buf->virt;
16538 		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
16539 		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
16540 		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
16541 			continue;
16542 		/* found a pending sequence that matches this frame */
16543 		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
16544 		break;
16545 	}
16546 
16547 	/* Free up all the frames from the partially assembled sequence */
16548 	if (seq_dmabuf) {
16549 		list_for_each_entry_safe(d_buf, n_buf,
16550 					 &seq_dmabuf->dbuf.list, list) {
16551 			list_del_init(&d_buf->list);
16552 			lpfc_in_buf_free(vport->phba, d_buf);
16553 		}
16554 		return true;
16555 	}
16556 	return false;
16557 }
16558 
16559 /**
16560  * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
16561  * @vport: pointer to a vitural port
16562  * @dmabuf: pointer to a dmabuf that describes the FC sequence
16563  *
16564  * This function tries to abort from the assembed sequence from upper level
16565  * protocol, described by the information from basic abbort @dmabuf. It
16566  * checks to see whether such pending context exists at upper level protocol.
16567  * If so, it shall clean up the pending context.
16568  *
16569  * Return
16570  * true  -- if there is matching pending context of the sequence cleaned
16571  *          at ulp;
16572  * false -- if there is no matching pending context of the sequence present
16573  *          at ulp.
16574  **/
16575 static bool
16576 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
16577 {
16578 	struct lpfc_hba *phba = vport->phba;
16579 	int handled;
16580 
16581 	/* Accepting abort at ulp with SLI4 only */
16582 	if (phba->sli_rev < LPFC_SLI_REV4)
16583 		return false;
16584 
16585 	/* Register all caring upper level protocols to attend abort */
16586 	handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
16587 	if (handled)
16588 		return true;
16589 
16590 	return false;
16591 }
16592 
16593 /**
16594  * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
16595  * @phba: Pointer to HBA context object.
16596  * @cmd_iocbq: pointer to the command iocbq structure.
16597  * @rsp_iocbq: pointer to the response iocbq structure.
16598  *
16599  * This function handles the sequence abort response iocb command complete
16600  * event. It properly releases the memory allocated to the sequence abort
16601  * accept iocb.
16602  **/
16603 static void
16604 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
16605 			     struct lpfc_iocbq *cmd_iocbq,
16606 			     struct lpfc_iocbq *rsp_iocbq)
16607 {
16608 	struct lpfc_nodelist *ndlp;
16609 
16610 	if (cmd_iocbq) {
16611 		ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
16612 		lpfc_nlp_put(ndlp);
16613 		lpfc_nlp_not_used(ndlp);
16614 		lpfc_sli_release_iocbq(phba, cmd_iocbq);
16615 	}
16616 
16617 	/* Failure means BLS ABORT RSP did not get delivered to remote node*/
16618 	if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
16619 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
16620 			"3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
16621 			rsp_iocbq->iocb.ulpStatus,
16622 			rsp_iocbq->iocb.un.ulpWord[4]);
16623 }
16624 
16625 /**
16626  * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
16627  * @phba: Pointer to HBA context object.
16628  * @xri: xri id in transaction.
16629  *
16630  * This function validates the xri maps to the known range of XRIs allocated an
16631  * used by the driver.
16632  **/
16633 uint16_t
16634 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
16635 		      uint16_t xri)
16636 {
16637 	uint16_t i;
16638 
16639 	for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
16640 		if (xri == phba->sli4_hba.xri_ids[i])
16641 			return i;
16642 	}
16643 	return NO_XRI;
16644 }
16645 
16646 /**
16647  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
16648  * @phba: Pointer to HBA context object.
16649  * @fc_hdr: pointer to a FC frame header.
16650  *
16651  * This function sends a basic response to a previous unsol sequence abort
16652  * event after aborting the sequence handling.
16653  **/
16654 void
16655 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
16656 			struct fc_frame_header *fc_hdr, bool aborted)
16657 {
16658 	struct lpfc_hba *phba = vport->phba;
16659 	struct lpfc_iocbq *ctiocb = NULL;
16660 	struct lpfc_nodelist *ndlp;
16661 	uint16_t oxid, rxid, xri, lxri;
16662 	uint32_t sid, fctl;
16663 	IOCB_t *icmd;
16664 	int rc;
16665 
16666 	if (!lpfc_is_link_up(phba))
16667 		return;
16668 
16669 	sid = sli4_sid_from_fc_hdr(fc_hdr);
16670 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
16671 	rxid = be16_to_cpu(fc_hdr->fh_rx_id);
16672 
16673 	ndlp = lpfc_findnode_did(vport, sid);
16674 	if (!ndlp) {
16675 		ndlp = lpfc_nlp_init(vport, sid);
16676 		if (!ndlp) {
16677 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
16678 					 "1268 Failed to allocate ndlp for "
16679 					 "oxid:x%x SID:x%x\n", oxid, sid);
16680 			return;
16681 		}
16682 		/* Put ndlp onto pport node list */
16683 		lpfc_enqueue_node(vport, ndlp);
16684 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
16685 		/* re-setup ndlp without removing from node list */
16686 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
16687 		if (!ndlp) {
16688 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
16689 					 "3275 Failed to active ndlp found "
16690 					 "for oxid:x%x SID:x%x\n", oxid, sid);
16691 			return;
16692 		}
16693 	}
16694 
16695 	/* Allocate buffer for rsp iocb */
16696 	ctiocb = lpfc_sli_get_iocbq(phba);
16697 	if (!ctiocb)
16698 		return;
16699 
16700 	/* Extract the F_CTL field from FC_HDR */
16701 	fctl = sli4_fctl_from_fc_hdr(fc_hdr);
16702 
16703 	icmd = &ctiocb->iocb;
16704 	icmd->un.xseq64.bdl.bdeSize = 0;
16705 	icmd->un.xseq64.bdl.ulpIoTag32 = 0;
16706 	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
16707 	icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_ACC;
16708 	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_BLS;
16709 
16710 	/* Fill in the rest of iocb fields */
16711 	icmd->ulpCommand = CMD_XMIT_BLS_RSP64_CX;
16712 	icmd->ulpBdeCount = 0;
16713 	icmd->ulpLe = 1;
16714 	icmd->ulpClass = CLASS3;
16715 	icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
16716 	ctiocb->context1 = lpfc_nlp_get(ndlp);
16717 
16718 	ctiocb->iocb_cmpl = NULL;
16719 	ctiocb->vport = phba->pport;
16720 	ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
16721 	ctiocb->sli4_lxritag = NO_XRI;
16722 	ctiocb->sli4_xritag = NO_XRI;
16723 
16724 	if (fctl & FC_FC_EX_CTX)
16725 		/* Exchange responder sent the abort so we
16726 		 * own the oxid.
16727 		 */
16728 		xri = oxid;
16729 	else
16730 		xri = rxid;
16731 	lxri = lpfc_sli4_xri_inrange(phba, xri);
16732 	if (lxri != NO_XRI)
16733 		lpfc_set_rrq_active(phba, ndlp, lxri,
16734 			(xri == oxid) ? rxid : oxid, 0);
16735 	/* For BA_ABTS from exchange responder, if the logical xri with
16736 	 * the oxid maps to the FCP XRI range, the port no longer has
16737 	 * that exchange context, send a BLS_RJT. Override the IOCB for
16738 	 * a BA_RJT.
16739 	 */
16740 	if ((fctl & FC_FC_EX_CTX) &&
16741 	    (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
16742 		icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
16743 		bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
16744 		bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
16745 		bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
16746 	}
16747 
16748 	/* If BA_ABTS failed to abort a partially assembled receive sequence,
16749 	 * the driver no longer has that exchange, send a BLS_RJT. Override
16750 	 * the IOCB for a BA_RJT.
16751 	 */
16752 	if (aborted == false) {
16753 		icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
16754 		bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
16755 		bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
16756 		bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
16757 	}
16758 
16759 	if (fctl & FC_FC_EX_CTX) {
16760 		/* ABTS sent by responder to CT exchange, construction
16761 		 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
16762 		 * field and RX_ID from ABTS for RX_ID field.
16763 		 */
16764 		bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_RSP);
16765 	} else {
16766 		/* ABTS sent by initiator to CT exchange, construction
16767 		 * of BA_ACC will need to allocate a new XRI as for the
16768 		 * XRI_TAG field.
16769 		 */
16770 		bf_set(lpfc_abts_orig, &icmd->un.bls_rsp, LPFC_ABTS_UNSOL_INT);
16771 	}
16772 	bf_set(lpfc_abts_rxid, &icmd->un.bls_rsp, rxid);
16773 	bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
16774 
16775 	/* Xmit CT abts response on exchange <xid> */
16776 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
16777 			 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
16778 			 icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
16779 
16780 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
16781 	if (rc == IOCB_ERROR) {
16782 		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
16783 				 "2925 Failed to issue CT ABTS RSP x%x on "
16784 				 "xri x%x, Data x%x\n",
16785 				 icmd->un.xseq64.w5.hcsw.Rctl, oxid,
16786 				 phba->link_state);
16787 		lpfc_nlp_put(ndlp);
16788 		ctiocb->context1 = NULL;
16789 		lpfc_sli_release_iocbq(phba, ctiocb);
16790 	}
16791 }
16792 
16793 /**
16794  * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
16795  * @vport: Pointer to the vport on which this sequence was received
16796  * @dmabuf: pointer to a dmabuf that describes the FC sequence
16797  *
16798  * This function handles an SLI-4 unsolicited abort event. If the unsolicited
16799  * receive sequence is only partially assembed by the driver, it shall abort
16800  * the partially assembled frames for the sequence. Otherwise, if the
16801  * unsolicited receive sequence has been completely assembled and passed to
16802  * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
16803  * unsolicited sequence has been aborted. After that, it will issue a basic
16804  * accept to accept the abort.
16805  **/
16806 static void
16807 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
16808 			     struct hbq_dmabuf *dmabuf)
16809 {
16810 	struct lpfc_hba *phba = vport->phba;
16811 	struct fc_frame_header fc_hdr;
16812 	uint32_t fctl;
16813 	bool aborted;
16814 
16815 	/* Make a copy of fc_hdr before the dmabuf being released */
16816 	memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
16817 	fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
16818 
16819 	if (fctl & FC_FC_EX_CTX) {
16820 		/* ABTS by responder to exchange, no cleanup needed */
16821 		aborted = true;
16822 	} else {
16823 		/* ABTS by initiator to exchange, need to do cleanup */
16824 		aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
16825 		if (aborted == false)
16826 			aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
16827 	}
16828 	lpfc_in_buf_free(phba, &dmabuf->dbuf);
16829 
16830 	if (phba->nvmet_support) {
16831 		lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
16832 		return;
16833 	}
16834 
16835 	/* Respond with BA_ACC or BA_RJT accordingly */
16836 	lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
16837 }
16838 
16839 /**
16840  * lpfc_seq_complete - Indicates if a sequence is complete
16841  * @dmabuf: pointer to a dmabuf that describes the FC sequence
16842  *
16843  * This function checks the sequence, starting with the frame described by
16844  * @dmabuf, to see if all the frames associated with this sequence are present.
16845  * the frames associated with this sequence are linked to the @dmabuf using the
16846  * dbuf list. This function looks for two major things. 1) That the first frame
16847  * has a sequence count of zero. 2) There is a frame with last frame of sequence
16848  * set. 3) That there are no holes in the sequence count. The function will
16849  * return 1 when the sequence is complete, otherwise it will return 0.
16850  **/
16851 static int
16852 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
16853 {
16854 	struct fc_frame_header *hdr;
16855 	struct lpfc_dmabuf *d_buf;
16856 	struct hbq_dmabuf *seq_dmabuf;
16857 	uint32_t fctl;
16858 	int seq_count = 0;
16859 
16860 	hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
16861 	/* make sure first fame of sequence has a sequence count of zero */
16862 	if (hdr->fh_seq_cnt != seq_count)
16863 		return 0;
16864 	fctl = (hdr->fh_f_ctl[0] << 16 |
16865 		hdr->fh_f_ctl[1] << 8 |
16866 		hdr->fh_f_ctl[2]);
16867 	/* If last frame of sequence we can return success. */
16868 	if (fctl & FC_FC_END_SEQ)
16869 		return 1;
16870 	list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
16871 		seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16872 		hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
16873 		/* If there is a hole in the sequence count then fail. */
16874 		if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
16875 			return 0;
16876 		fctl = (hdr->fh_f_ctl[0] << 16 |
16877 			hdr->fh_f_ctl[1] << 8 |
16878 			hdr->fh_f_ctl[2]);
16879 		/* If last frame of sequence we can return success. */
16880 		if (fctl & FC_FC_END_SEQ)
16881 			return 1;
16882 	}
16883 	return 0;
16884 }
16885 
16886 /**
16887  * lpfc_prep_seq - Prep sequence for ULP processing
16888  * @vport: Pointer to the vport on which this sequence was received
16889  * @dmabuf: pointer to a dmabuf that describes the FC sequence
16890  *
16891  * This function takes a sequence, described by a list of frames, and creates
16892  * a list of iocbq structures to describe the sequence. This iocbq list will be
16893  * used to issue to the generic unsolicited sequence handler. This routine
16894  * returns a pointer to the first iocbq in the list. If the function is unable
16895  * to allocate an iocbq then it throw out the received frames that were not
16896  * able to be described and return a pointer to the first iocbq. If unable to
16897  * allocate any iocbqs (including the first) this function will return NULL.
16898  **/
16899 static struct lpfc_iocbq *
16900 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
16901 {
16902 	struct hbq_dmabuf *hbq_buf;
16903 	struct lpfc_dmabuf *d_buf, *n_buf;
16904 	struct lpfc_iocbq *first_iocbq, *iocbq;
16905 	struct fc_frame_header *fc_hdr;
16906 	uint32_t sid;
16907 	uint32_t len, tot_len;
16908 	struct ulp_bde64 *pbde;
16909 
16910 	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
16911 	/* remove from receive buffer list */
16912 	list_del_init(&seq_dmabuf->hbuf.list);
16913 	lpfc_update_rcv_time_stamp(vport);
16914 	/* get the Remote Port's SID */
16915 	sid = sli4_sid_from_fc_hdr(fc_hdr);
16916 	tot_len = 0;
16917 	/* Get an iocbq struct to fill in. */
16918 	first_iocbq = lpfc_sli_get_iocbq(vport->phba);
16919 	if (first_iocbq) {
16920 		/* Initialize the first IOCB. */
16921 		first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
16922 		first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
16923 		first_iocbq->vport = vport;
16924 
16925 		/* Check FC Header to see what TYPE of frame we are rcv'ing */
16926 		if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
16927 			first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_ELS64_CX;
16928 			first_iocbq->iocb.un.rcvels.parmRo =
16929 				sli4_did_from_fc_hdr(fc_hdr);
16930 			first_iocbq->iocb.ulpPU = PARM_NPIV_DID;
16931 		} else
16932 			first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
16933 		first_iocbq->iocb.ulpContext = NO_XRI;
16934 		first_iocbq->iocb.unsli3.rcvsli3.ox_id =
16935 			be16_to_cpu(fc_hdr->fh_ox_id);
16936 		/* iocbq is prepped for internal consumption.  Physical vpi. */
16937 		first_iocbq->iocb.unsli3.rcvsli3.vpi =
16938 			vport->phba->vpi_ids[vport->vpi];
16939 		/* put the first buffer into the first IOCBq */
16940 		tot_len = bf_get(lpfc_rcqe_length,
16941 				       &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
16942 
16943 		first_iocbq->context2 = &seq_dmabuf->dbuf;
16944 		first_iocbq->context3 = NULL;
16945 		first_iocbq->iocb.ulpBdeCount = 1;
16946 		if (tot_len > LPFC_DATA_BUF_SIZE)
16947 			first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
16948 							LPFC_DATA_BUF_SIZE;
16949 		else
16950 			first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
16951 
16952 		first_iocbq->iocb.un.rcvels.remoteID = sid;
16953 
16954 		first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
16955 	}
16956 	iocbq = first_iocbq;
16957 	/*
16958 	 * Each IOCBq can have two Buffers assigned, so go through the list
16959 	 * of buffers for this sequence and save two buffers in each IOCBq
16960 	 */
16961 	list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
16962 		if (!iocbq) {
16963 			lpfc_in_buf_free(vport->phba, d_buf);
16964 			continue;
16965 		}
16966 		if (!iocbq->context3) {
16967 			iocbq->context3 = d_buf;
16968 			iocbq->iocb.ulpBdeCount++;
16969 			/* We need to get the size out of the right CQE */
16970 			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16971 			len = bf_get(lpfc_rcqe_length,
16972 				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
16973 			pbde = (struct ulp_bde64 *)
16974 					&iocbq->iocb.unsli3.sli3Words[4];
16975 			if (len > LPFC_DATA_BUF_SIZE)
16976 				pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
16977 			else
16978 				pbde->tus.f.bdeSize = len;
16979 
16980 			iocbq->iocb.unsli3.rcvsli3.acc_len += len;
16981 			tot_len += len;
16982 		} else {
16983 			iocbq = lpfc_sli_get_iocbq(vport->phba);
16984 			if (!iocbq) {
16985 				if (first_iocbq) {
16986 					first_iocbq->iocb.ulpStatus =
16987 							IOSTAT_FCP_RSP_ERROR;
16988 					first_iocbq->iocb.un.ulpWord[4] =
16989 							IOERR_NO_RESOURCES;
16990 				}
16991 				lpfc_in_buf_free(vport->phba, d_buf);
16992 				continue;
16993 			}
16994 			/* We need to get the size out of the right CQE */
16995 			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
16996 			len = bf_get(lpfc_rcqe_length,
16997 				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
16998 			iocbq->context2 = d_buf;
16999 			iocbq->context3 = NULL;
17000 			iocbq->iocb.ulpBdeCount = 1;
17001 			if (len > LPFC_DATA_BUF_SIZE)
17002 				iocbq->iocb.un.cont64[0].tus.f.bdeSize =
17003 							LPFC_DATA_BUF_SIZE;
17004 			else
17005 				iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
17006 
17007 			tot_len += len;
17008 			iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
17009 
17010 			iocbq->iocb.un.rcvels.remoteID = sid;
17011 			list_add_tail(&iocbq->list, &first_iocbq->list);
17012 		}
17013 	}
17014 	return first_iocbq;
17015 }
17016 
17017 static void
17018 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
17019 			  struct hbq_dmabuf *seq_dmabuf)
17020 {
17021 	struct fc_frame_header *fc_hdr;
17022 	struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
17023 	struct lpfc_hba *phba = vport->phba;
17024 
17025 	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
17026 	iocbq = lpfc_prep_seq(vport, seq_dmabuf);
17027 	if (!iocbq) {
17028 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17029 				"2707 Ring %d handler: Failed to allocate "
17030 				"iocb Rctl x%x Type x%x received\n",
17031 				LPFC_ELS_RING,
17032 				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17033 		return;
17034 	}
17035 	if (!lpfc_complete_unsol_iocb(phba,
17036 				      phba->sli4_hba.els_wq->pring,
17037 				      iocbq, fc_hdr->fh_r_ctl,
17038 				      fc_hdr->fh_type))
17039 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17040 				"2540 Ring %d handler: unexpected Rctl "
17041 				"x%x Type x%x received\n",
17042 				LPFC_ELS_RING,
17043 				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
17044 
17045 	/* Free iocb created in lpfc_prep_seq */
17046 	list_for_each_entry_safe(curr_iocb, next_iocb,
17047 		&iocbq->list, list) {
17048 		list_del_init(&curr_iocb->list);
17049 		lpfc_sli_release_iocbq(phba, curr_iocb);
17050 	}
17051 	lpfc_sli_release_iocbq(phba, iocbq);
17052 }
17053 
17054 static void
17055 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
17056 			    struct lpfc_iocbq *rspiocb)
17057 {
17058 	struct lpfc_dmabuf *pcmd = cmdiocb->context2;
17059 
17060 	if (pcmd && pcmd->virt)
17061 		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17062 	kfree(pcmd);
17063 	lpfc_sli_release_iocbq(phba, cmdiocb);
17064 }
17065 
17066 static void
17067 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
17068 			      struct hbq_dmabuf *dmabuf)
17069 {
17070 	struct fc_frame_header *fc_hdr;
17071 	struct lpfc_hba *phba = vport->phba;
17072 	struct lpfc_iocbq *iocbq = NULL;
17073 	union  lpfc_wqe *wqe;
17074 	struct lpfc_dmabuf *pcmd = NULL;
17075 	uint32_t frame_len;
17076 	int rc;
17077 
17078 	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17079 	frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
17080 
17081 	/* Send the received frame back */
17082 	iocbq = lpfc_sli_get_iocbq(phba);
17083 	if (!iocbq)
17084 		goto exit;
17085 
17086 	/* Allocate buffer for command payload */
17087 	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
17088 	if (pcmd)
17089 		pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
17090 					    &pcmd->phys);
17091 	if (!pcmd || !pcmd->virt)
17092 		goto exit;
17093 
17094 	INIT_LIST_HEAD(&pcmd->list);
17095 
17096 	/* copyin the payload */
17097 	memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
17098 
17099 	/* fill in BDE's for command */
17100 	iocbq->iocb.un.xseq64.bdl.addrHigh = putPaddrHigh(pcmd->phys);
17101 	iocbq->iocb.un.xseq64.bdl.addrLow = putPaddrLow(pcmd->phys);
17102 	iocbq->iocb.un.xseq64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
17103 	iocbq->iocb.un.xseq64.bdl.bdeSize = frame_len;
17104 
17105 	iocbq->context2 = pcmd;
17106 	iocbq->vport = vport;
17107 	iocbq->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
17108 	iocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
17109 
17110 	/*
17111 	 * Setup rest of the iocb as though it were a WQE
17112 	 * Build the SEND_FRAME WQE
17113 	 */
17114 	wqe = (union lpfc_wqe *)&iocbq->iocb;
17115 
17116 	wqe->send_frame.frame_len = frame_len;
17117 	wqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((uint32_t *)fc_hdr));
17118 	wqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((uint32_t *)fc_hdr + 1));
17119 	wqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((uint32_t *)fc_hdr + 2));
17120 	wqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((uint32_t *)fc_hdr + 3));
17121 	wqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((uint32_t *)fc_hdr + 4));
17122 	wqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((uint32_t *)fc_hdr + 5));
17123 
17124 	iocbq->iocb.ulpCommand = CMD_SEND_FRAME;
17125 	iocbq->iocb.ulpLe = 1;
17126 	iocbq->iocb_cmpl = lpfc_sli4_mds_loopback_cmpl;
17127 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
17128 	if (rc == IOCB_ERROR)
17129 		goto exit;
17130 
17131 	lpfc_in_buf_free(phba, &dmabuf->dbuf);
17132 	return;
17133 
17134 exit:
17135 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
17136 			"2023 Unable to process MDS loopback frame\n");
17137 	if (pcmd && pcmd->virt)
17138 		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
17139 	kfree(pcmd);
17140 	lpfc_sli_release_iocbq(phba, iocbq);
17141 	lpfc_in_buf_free(phba, &dmabuf->dbuf);
17142 }
17143 
17144 /**
17145  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
17146  * @phba: Pointer to HBA context object.
17147  *
17148  * This function is called with no lock held. This function processes all
17149  * the received buffers and gives it to upper layers when a received buffer
17150  * indicates that it is the final frame in the sequence. The interrupt
17151  * service routine processes received buffers at interrupt contexts.
17152  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
17153  * appropriate receive function when the final frame in a sequence is received.
17154  **/
17155 void
17156 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
17157 				 struct hbq_dmabuf *dmabuf)
17158 {
17159 	struct hbq_dmabuf *seq_dmabuf;
17160 	struct fc_frame_header *fc_hdr;
17161 	struct lpfc_vport *vport;
17162 	uint32_t fcfi;
17163 	uint32_t did;
17164 
17165 	/* Process each received buffer */
17166 	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
17167 
17168 	/* check to see if this a valid type of frame */
17169 	if (lpfc_fc_frame_check(phba, fc_hdr)) {
17170 		lpfc_in_buf_free(phba, &dmabuf->dbuf);
17171 		return;
17172 	}
17173 
17174 	if ((bf_get(lpfc_cqe_code,
17175 		    &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
17176 		fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
17177 			      &dmabuf->cq_event.cqe.rcqe_cmpl);
17178 	else
17179 		fcfi = bf_get(lpfc_rcqe_fcf_id,
17180 			      &dmabuf->cq_event.cqe.rcqe_cmpl);
17181 
17182 	if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
17183 		vport = phba->pport;
17184 		/* Handle MDS Loopback frames */
17185 		lpfc_sli4_handle_mds_loopback(vport, dmabuf);
17186 		return;
17187 	}
17188 
17189 	/* d_id this frame is directed to */
17190 	did = sli4_did_from_fc_hdr(fc_hdr);
17191 
17192 	vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
17193 	if (!vport) {
17194 		/* throw out the frame */
17195 		lpfc_in_buf_free(phba, &dmabuf->dbuf);
17196 		return;
17197 	}
17198 
17199 	/* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
17200 	if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
17201 		(did != Fabric_DID)) {
17202 		/*
17203 		 * Throw out the frame if we are not pt2pt.
17204 		 * The pt2pt protocol allows for discovery frames
17205 		 * to be received without a registered VPI.
17206 		 */
17207 		if (!(vport->fc_flag & FC_PT2PT) ||
17208 			(phba->link_state == LPFC_HBA_READY)) {
17209 			lpfc_in_buf_free(phba, &dmabuf->dbuf);
17210 			return;
17211 		}
17212 	}
17213 
17214 	/* Handle the basic abort sequence (BA_ABTS) event */
17215 	if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
17216 		lpfc_sli4_handle_unsol_abort(vport, dmabuf);
17217 		return;
17218 	}
17219 
17220 	/* Link this frame */
17221 	seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
17222 	if (!seq_dmabuf) {
17223 		/* unable to add frame to vport - throw it out */
17224 		lpfc_in_buf_free(phba, &dmabuf->dbuf);
17225 		return;
17226 	}
17227 	/* If not last frame in sequence continue processing frames. */
17228 	if (!lpfc_seq_complete(seq_dmabuf))
17229 		return;
17230 
17231 	/* Send the complete sequence to the upper layer protocol */
17232 	lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
17233 }
17234 
17235 /**
17236  * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
17237  * @phba: pointer to lpfc hba data structure.
17238  *
17239  * This routine is invoked to post rpi header templates to the
17240  * HBA consistent with the SLI-4 interface spec.  This routine
17241  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17242  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17243  *
17244  * This routine does not require any locks.  It's usage is expected
17245  * to be driver load or reset recovery when the driver is
17246  * sequential.
17247  *
17248  * Return codes
17249  * 	0 - successful
17250  *      -EIO - The mailbox failed to complete successfully.
17251  * 	When this error occurs, the driver is not guaranteed
17252  *	to have any rpi regions posted to the device and
17253  *	must either attempt to repost the regions or take a
17254  *	fatal error.
17255  **/
17256 int
17257 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
17258 {
17259 	struct lpfc_rpi_hdr *rpi_page;
17260 	uint32_t rc = 0;
17261 	uint16_t lrpi = 0;
17262 
17263 	/* SLI4 ports that support extents do not require RPI headers. */
17264 	if (!phba->sli4_hba.rpi_hdrs_in_use)
17265 		goto exit;
17266 	if (phba->sli4_hba.extents_in_use)
17267 		return -EIO;
17268 
17269 	list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
17270 		/*
17271 		 * Assign the rpi headers a physical rpi only if the driver
17272 		 * has not initialized those resources.  A port reset only
17273 		 * needs the headers posted.
17274 		 */
17275 		if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
17276 		    LPFC_RPI_RSRC_RDY)
17277 			rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
17278 
17279 		rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
17280 		if (rc != MBX_SUCCESS) {
17281 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17282 					"2008 Error %d posting all rpi "
17283 					"headers\n", rc);
17284 			rc = -EIO;
17285 			break;
17286 		}
17287 	}
17288 
17289  exit:
17290 	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
17291 	       LPFC_RPI_RSRC_RDY);
17292 	return rc;
17293 }
17294 
17295 /**
17296  * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
17297  * @phba: pointer to lpfc hba data structure.
17298  * @rpi_page:  pointer to the rpi memory region.
17299  *
17300  * This routine is invoked to post a single rpi header to the
17301  * HBA consistent with the SLI-4 interface spec.  This memory region
17302  * maps up to 64 rpi context regions.
17303  *
17304  * Return codes
17305  * 	0 - successful
17306  * 	-ENOMEM - No available memory
17307  *      -EIO - The mailbox failed to complete successfully.
17308  **/
17309 int
17310 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
17311 {
17312 	LPFC_MBOXQ_t *mboxq;
17313 	struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
17314 	uint32_t rc = 0;
17315 	uint32_t shdr_status, shdr_add_status;
17316 	union lpfc_sli4_cfg_shdr *shdr;
17317 
17318 	/* SLI4 ports that support extents do not require RPI headers. */
17319 	if (!phba->sli4_hba.rpi_hdrs_in_use)
17320 		return rc;
17321 	if (phba->sli4_hba.extents_in_use)
17322 		return -EIO;
17323 
17324 	/* The port is notified of the header region via a mailbox command. */
17325 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17326 	if (!mboxq) {
17327 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17328 				"2001 Unable to allocate memory for issuing "
17329 				"SLI_CONFIG_SPECIAL mailbox command\n");
17330 		return -ENOMEM;
17331 	}
17332 
17333 	/* Post all rpi memory regions to the port. */
17334 	hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
17335 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
17336 			 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
17337 			 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
17338 			 sizeof(struct lpfc_sli4_cfg_mhdr),
17339 			 LPFC_SLI4_MBX_EMBED);
17340 
17341 
17342 	/* Post the physical rpi to the port for this rpi header. */
17343 	bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
17344 	       rpi_page->start_rpi);
17345 	bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
17346 	       hdr_tmpl, rpi_page->page_count);
17347 
17348 	hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
17349 	hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
17350 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
17351 	shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
17352 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17353 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17354 	if (rc != MBX_TIMEOUT)
17355 		mempool_free(mboxq, phba->mbox_mem_pool);
17356 	if (shdr_status || shdr_add_status || rc) {
17357 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17358 				"2514 POST_RPI_HDR mailbox failed with "
17359 				"status x%x add_status x%x, mbx status x%x\n",
17360 				shdr_status, shdr_add_status, rc);
17361 		rc = -ENXIO;
17362 	} else {
17363 		/*
17364 		 * The next_rpi stores the next logical module-64 rpi value used
17365 		 * to post physical rpis in subsequent rpi postings.
17366 		 */
17367 		spin_lock_irq(&phba->hbalock);
17368 		phba->sli4_hba.next_rpi = rpi_page->next_rpi;
17369 		spin_unlock_irq(&phba->hbalock);
17370 	}
17371 	return rc;
17372 }
17373 
17374 /**
17375  * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
17376  * @phba: pointer to lpfc hba data structure.
17377  *
17378  * This routine is invoked to post rpi header templates to the
17379  * HBA consistent with the SLI-4 interface spec.  This routine
17380  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17381  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17382  *
17383  * Returns
17384  * 	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17385  * 	LPFC_RPI_ALLOC_ERROR if no rpis are available.
17386  **/
17387 int
17388 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
17389 {
17390 	unsigned long rpi;
17391 	uint16_t max_rpi, rpi_limit;
17392 	uint16_t rpi_remaining, lrpi = 0;
17393 	struct lpfc_rpi_hdr *rpi_hdr;
17394 	unsigned long iflag;
17395 
17396 	/*
17397 	 * Fetch the next logical rpi.  Because this index is logical,
17398 	 * the  driver starts at 0 each time.
17399 	 */
17400 	spin_lock_irqsave(&phba->hbalock, iflag);
17401 	max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
17402 	rpi_limit = phba->sli4_hba.next_rpi;
17403 
17404 	rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
17405 	if (rpi >= rpi_limit)
17406 		rpi = LPFC_RPI_ALLOC_ERROR;
17407 	else {
17408 		set_bit(rpi, phba->sli4_hba.rpi_bmask);
17409 		phba->sli4_hba.max_cfg_param.rpi_used++;
17410 		phba->sli4_hba.rpi_count++;
17411 	}
17412 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
17413 			"0001 rpi:%x max:%x lim:%x\n",
17414 			(int) rpi, max_rpi, rpi_limit);
17415 
17416 	/*
17417 	 * Don't try to allocate more rpi header regions if the device limit
17418 	 * has been exhausted.
17419 	 */
17420 	if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
17421 	    (phba->sli4_hba.rpi_count >= max_rpi)) {
17422 		spin_unlock_irqrestore(&phba->hbalock, iflag);
17423 		return rpi;
17424 	}
17425 
17426 	/*
17427 	 * RPI header postings are not required for SLI4 ports capable of
17428 	 * extents.
17429 	 */
17430 	if (!phba->sli4_hba.rpi_hdrs_in_use) {
17431 		spin_unlock_irqrestore(&phba->hbalock, iflag);
17432 		return rpi;
17433 	}
17434 
17435 	/*
17436 	 * If the driver is running low on rpi resources, allocate another
17437 	 * page now.  Note that the next_rpi value is used because
17438 	 * it represents how many are actually in use whereas max_rpi notes
17439 	 * how many are supported max by the device.
17440 	 */
17441 	rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
17442 	spin_unlock_irqrestore(&phba->hbalock, iflag);
17443 	if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
17444 		rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
17445 		if (!rpi_hdr) {
17446 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17447 					"2002 Error Could not grow rpi "
17448 					"count\n");
17449 		} else {
17450 			lrpi = rpi_hdr->start_rpi;
17451 			rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
17452 			lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
17453 		}
17454 	}
17455 
17456 	return rpi;
17457 }
17458 
17459 /**
17460  * lpfc_sli4_free_rpi - Release an rpi for reuse.
17461  * @phba: pointer to lpfc hba data structure.
17462  *
17463  * This routine is invoked to release an rpi to the pool of
17464  * available rpis maintained by the driver.
17465  **/
17466 static void
17467 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17468 {
17469 	if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
17470 		phba->sli4_hba.rpi_count--;
17471 		phba->sli4_hba.max_cfg_param.rpi_used--;
17472 	}
17473 }
17474 
17475 /**
17476  * lpfc_sli4_free_rpi - Release an rpi for reuse.
17477  * @phba: pointer to lpfc hba data structure.
17478  *
17479  * This routine is invoked to release an rpi to the pool of
17480  * available rpis maintained by the driver.
17481  **/
17482 void
17483 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
17484 {
17485 	spin_lock_irq(&phba->hbalock);
17486 	__lpfc_sli4_free_rpi(phba, rpi);
17487 	spin_unlock_irq(&phba->hbalock);
17488 }
17489 
17490 /**
17491  * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
17492  * @phba: pointer to lpfc hba data structure.
17493  *
17494  * This routine is invoked to remove the memory region that
17495  * provided rpi via a bitmask.
17496  **/
17497 void
17498 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
17499 {
17500 	kfree(phba->sli4_hba.rpi_bmask);
17501 	kfree(phba->sli4_hba.rpi_ids);
17502 	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
17503 }
17504 
17505 /**
17506  * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
17507  * @phba: pointer to lpfc hba data structure.
17508  *
17509  * This routine is invoked to remove the memory region that
17510  * provided rpi via a bitmask.
17511  **/
17512 int
17513 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
17514 	void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *arg)
17515 {
17516 	LPFC_MBOXQ_t *mboxq;
17517 	struct lpfc_hba *phba = ndlp->phba;
17518 	int rc;
17519 
17520 	/* The port is notified of the header region via a mailbox command. */
17521 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17522 	if (!mboxq)
17523 		return -ENOMEM;
17524 
17525 	/* Post all rpi memory regions to the port. */
17526 	lpfc_resume_rpi(mboxq, ndlp);
17527 	if (cmpl) {
17528 		mboxq->mbox_cmpl = cmpl;
17529 		mboxq->context1 = arg;
17530 		mboxq->context2 = ndlp;
17531 	} else
17532 		mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17533 	mboxq->vport = ndlp->vport;
17534 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17535 	if (rc == MBX_NOT_FINISHED) {
17536 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
17537 				"2010 Resume RPI Mailbox failed "
17538 				"status %d, mbxStatus x%x\n", rc,
17539 				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
17540 		mempool_free(mboxq, phba->mbox_mem_pool);
17541 		return -EIO;
17542 	}
17543 	return 0;
17544 }
17545 
17546 /**
17547  * lpfc_sli4_init_vpi - Initialize a vpi with the port
17548  * @vport: Pointer to the vport for which the vpi is being initialized
17549  *
17550  * This routine is invoked to activate a vpi with the port.
17551  *
17552  * Returns:
17553  *    0 success
17554  *    -Evalue otherwise
17555  **/
17556 int
17557 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
17558 {
17559 	LPFC_MBOXQ_t *mboxq;
17560 	int rc = 0;
17561 	int retval = MBX_SUCCESS;
17562 	uint32_t mbox_tmo;
17563 	struct lpfc_hba *phba = vport->phba;
17564 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17565 	if (!mboxq)
17566 		return -ENOMEM;
17567 	lpfc_init_vpi(phba, mboxq, vport->vpi);
17568 	mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
17569 	rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
17570 	if (rc != MBX_SUCCESS) {
17571 		lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI,
17572 				"2022 INIT VPI Mailbox failed "
17573 				"status %d, mbxStatus x%x\n", rc,
17574 				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
17575 		retval = -EIO;
17576 	}
17577 	if (rc != MBX_TIMEOUT)
17578 		mempool_free(mboxq, vport->phba->mbox_mem_pool);
17579 
17580 	return retval;
17581 }
17582 
17583 /**
17584  * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
17585  * @phba: pointer to lpfc hba data structure.
17586  * @mboxq: Pointer to mailbox object.
17587  *
17588  * This routine is invoked to manually add a single FCF record. The caller
17589  * must pass a completely initialized FCF_Record.  This routine takes
17590  * care of the nonembedded mailbox operations.
17591  **/
17592 static void
17593 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
17594 {
17595 	void *virt_addr;
17596 	union lpfc_sli4_cfg_shdr *shdr;
17597 	uint32_t shdr_status, shdr_add_status;
17598 
17599 	virt_addr = mboxq->sge_array->addr[0];
17600 	/* The IOCTL status is embedded in the mailbox subheader. */
17601 	shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
17602 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17603 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17604 
17605 	if ((shdr_status || shdr_add_status) &&
17606 		(shdr_status != STATUS_FCF_IN_USE))
17607 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17608 			"2558 ADD_FCF_RECORD mailbox failed with "
17609 			"status x%x add_status x%x\n",
17610 			shdr_status, shdr_add_status);
17611 
17612 	lpfc_sli4_mbox_cmd_free(phba, mboxq);
17613 }
17614 
17615 /**
17616  * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
17617  * @phba: pointer to lpfc hba data structure.
17618  * @fcf_record:  pointer to the initialized fcf record to add.
17619  *
17620  * This routine is invoked to manually add a single FCF record. The caller
17621  * must pass a completely initialized FCF_Record.  This routine takes
17622  * care of the nonembedded mailbox operations.
17623  **/
17624 int
17625 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
17626 {
17627 	int rc = 0;
17628 	LPFC_MBOXQ_t *mboxq;
17629 	uint8_t *bytep;
17630 	void *virt_addr;
17631 	struct lpfc_mbx_sge sge;
17632 	uint32_t alloc_len, req_len;
17633 	uint32_t fcfindex;
17634 
17635 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17636 	if (!mboxq) {
17637 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17638 			"2009 Failed to allocate mbox for ADD_FCF cmd\n");
17639 		return -ENOMEM;
17640 	}
17641 
17642 	req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
17643 		  sizeof(uint32_t);
17644 
17645 	/* Allocate DMA memory and set up the non-embedded mailbox command */
17646 	alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
17647 				     LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
17648 				     req_len, LPFC_SLI4_MBX_NEMBED);
17649 	if (alloc_len < req_len) {
17650 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17651 			"2523 Allocated DMA memory size (x%x) is "
17652 			"less than the requested DMA memory "
17653 			"size (x%x)\n", alloc_len, req_len);
17654 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
17655 		return -ENOMEM;
17656 	}
17657 
17658 	/*
17659 	 * Get the first SGE entry from the non-embedded DMA memory.  This
17660 	 * routine only uses a single SGE.
17661 	 */
17662 	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
17663 	virt_addr = mboxq->sge_array->addr[0];
17664 	/*
17665 	 * Configure the FCF record for FCFI 0.  This is the driver's
17666 	 * hardcoded default and gets used in nonFIP mode.
17667 	 */
17668 	fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
17669 	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
17670 	lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
17671 
17672 	/*
17673 	 * Copy the fcf_index and the FCF Record Data. The data starts after
17674 	 * the FCoE header plus word10. The data copy needs to be endian
17675 	 * correct.
17676 	 */
17677 	bytep += sizeof(uint32_t);
17678 	lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
17679 	mboxq->vport = phba->pport;
17680 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
17681 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17682 	if (rc == MBX_NOT_FINISHED) {
17683 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17684 			"2515 ADD_FCF_RECORD mailbox failed with "
17685 			"status 0x%x\n", rc);
17686 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
17687 		rc = -EIO;
17688 	} else
17689 		rc = 0;
17690 
17691 	return rc;
17692 }
17693 
17694 /**
17695  * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
17696  * @phba: pointer to lpfc hba data structure.
17697  * @fcf_record:  pointer to the fcf record to write the default data.
17698  * @fcf_index: FCF table entry index.
17699  *
17700  * This routine is invoked to build the driver's default FCF record.  The
17701  * values used are hardcoded.  This routine handles memory initialization.
17702  *
17703  **/
17704 void
17705 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
17706 				struct fcf_record *fcf_record,
17707 				uint16_t fcf_index)
17708 {
17709 	memset(fcf_record, 0, sizeof(struct fcf_record));
17710 	fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
17711 	fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
17712 	fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
17713 	bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
17714 	bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
17715 	bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
17716 	bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
17717 	bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
17718 	bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
17719 	bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
17720 	bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
17721 	bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
17722 	bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
17723 	bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
17724 	bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
17725 	bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
17726 		LPFC_FCF_FPMA | LPFC_FCF_SPMA);
17727 	/* Set the VLAN bit map */
17728 	if (phba->valid_vlan) {
17729 		fcf_record->vlan_bitmap[phba->vlan_id / 8]
17730 			= 1 << (phba->vlan_id % 8);
17731 	}
17732 }
17733 
17734 /**
17735  * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
17736  * @phba: pointer to lpfc hba data structure.
17737  * @fcf_index: FCF table entry offset.
17738  *
17739  * This routine is invoked to scan the entire FCF table by reading FCF
17740  * record and processing it one at a time starting from the @fcf_index
17741  * for initial FCF discovery or fast FCF failover rediscovery.
17742  *
17743  * Return 0 if the mailbox command is submitted successfully, none 0
17744  * otherwise.
17745  **/
17746 int
17747 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
17748 {
17749 	int rc = 0, error;
17750 	LPFC_MBOXQ_t *mboxq;
17751 
17752 	phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
17753 	phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
17754 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17755 	if (!mboxq) {
17756 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17757 				"2000 Failed to allocate mbox for "
17758 				"READ_FCF cmd\n");
17759 		error = -ENOMEM;
17760 		goto fail_fcf_scan;
17761 	}
17762 	/* Construct the read FCF record mailbox command */
17763 	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
17764 	if (rc) {
17765 		error = -EINVAL;
17766 		goto fail_fcf_scan;
17767 	}
17768 	/* Issue the mailbox command asynchronously */
17769 	mboxq->vport = phba->pport;
17770 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
17771 
17772 	spin_lock_irq(&phba->hbalock);
17773 	phba->hba_flag |= FCF_TS_INPROG;
17774 	spin_unlock_irq(&phba->hbalock);
17775 
17776 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17777 	if (rc == MBX_NOT_FINISHED)
17778 		error = -EIO;
17779 	else {
17780 		/* Reset eligible FCF count for new scan */
17781 		if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
17782 			phba->fcf.eligible_fcf_cnt = 0;
17783 		error = 0;
17784 	}
17785 fail_fcf_scan:
17786 	if (error) {
17787 		if (mboxq)
17788 			lpfc_sli4_mbox_cmd_free(phba, mboxq);
17789 		/* FCF scan failed, clear FCF_TS_INPROG flag */
17790 		spin_lock_irq(&phba->hbalock);
17791 		phba->hba_flag &= ~FCF_TS_INPROG;
17792 		spin_unlock_irq(&phba->hbalock);
17793 	}
17794 	return error;
17795 }
17796 
17797 /**
17798  * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
17799  * @phba: pointer to lpfc hba data structure.
17800  * @fcf_index: FCF table entry offset.
17801  *
17802  * This routine is invoked to read an FCF record indicated by @fcf_index
17803  * and to use it for FLOGI roundrobin FCF failover.
17804  *
17805  * Return 0 if the mailbox command is submitted successfully, none 0
17806  * otherwise.
17807  **/
17808 int
17809 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
17810 {
17811 	int rc = 0, error;
17812 	LPFC_MBOXQ_t *mboxq;
17813 
17814 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17815 	if (!mboxq) {
17816 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
17817 				"2763 Failed to allocate mbox for "
17818 				"READ_FCF cmd\n");
17819 		error = -ENOMEM;
17820 		goto fail_fcf_read;
17821 	}
17822 	/* Construct the read FCF record mailbox command */
17823 	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
17824 	if (rc) {
17825 		error = -EINVAL;
17826 		goto fail_fcf_read;
17827 	}
17828 	/* Issue the mailbox command asynchronously */
17829 	mboxq->vport = phba->pport;
17830 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
17831 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17832 	if (rc == MBX_NOT_FINISHED)
17833 		error = -EIO;
17834 	else
17835 		error = 0;
17836 
17837 fail_fcf_read:
17838 	if (error && mboxq)
17839 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
17840 	return error;
17841 }
17842 
17843 /**
17844  * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
17845  * @phba: pointer to lpfc hba data structure.
17846  * @fcf_index: FCF table entry offset.
17847  *
17848  * This routine is invoked to read an FCF record indicated by @fcf_index to
17849  * determine whether it's eligible for FLOGI roundrobin failover list.
17850  *
17851  * Return 0 if the mailbox command is submitted successfully, none 0
17852  * otherwise.
17853  **/
17854 int
17855 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
17856 {
17857 	int rc = 0, error;
17858 	LPFC_MBOXQ_t *mboxq;
17859 
17860 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17861 	if (!mboxq) {
17862 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
17863 				"2758 Failed to allocate mbox for "
17864 				"READ_FCF cmd\n");
17865 				error = -ENOMEM;
17866 				goto fail_fcf_read;
17867 	}
17868 	/* Construct the read FCF record mailbox command */
17869 	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
17870 	if (rc) {
17871 		error = -EINVAL;
17872 		goto fail_fcf_read;
17873 	}
17874 	/* Issue the mailbox command asynchronously */
17875 	mboxq->vport = phba->pport;
17876 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
17877 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
17878 	if (rc == MBX_NOT_FINISHED)
17879 		error = -EIO;
17880 	else
17881 		error = 0;
17882 
17883 fail_fcf_read:
17884 	if (error && mboxq)
17885 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
17886 	return error;
17887 }
17888 
17889 /**
17890  * lpfc_check_next_fcf_pri_level
17891  * phba pointer to the lpfc_hba struct for this port.
17892  * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
17893  * routine when the rr_bmask is empty. The FCF indecies are put into the
17894  * rr_bmask based on their priority level. Starting from the highest priority
17895  * to the lowest. The most likely FCF candidate will be in the highest
17896  * priority group. When this routine is called it searches the fcf_pri list for
17897  * next lowest priority group and repopulates the rr_bmask with only those
17898  * fcf_indexes.
17899  * returns:
17900  * 1=success 0=failure
17901  **/
17902 static int
17903 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
17904 {
17905 	uint16_t next_fcf_pri;
17906 	uint16_t last_index;
17907 	struct lpfc_fcf_pri *fcf_pri;
17908 	int rc;
17909 	int ret = 0;
17910 
17911 	last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
17912 			LPFC_SLI4_FCF_TBL_INDX_MAX);
17913 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
17914 			"3060 Last IDX %d\n", last_index);
17915 
17916 	/* Verify the priority list has 2 or more entries */
17917 	spin_lock_irq(&phba->hbalock);
17918 	if (list_empty(&phba->fcf.fcf_pri_list) ||
17919 	    list_is_singular(&phba->fcf.fcf_pri_list)) {
17920 		spin_unlock_irq(&phba->hbalock);
17921 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
17922 			"3061 Last IDX %d\n", last_index);
17923 		return 0; /* Empty rr list */
17924 	}
17925 	spin_unlock_irq(&phba->hbalock);
17926 
17927 	next_fcf_pri = 0;
17928 	/*
17929 	 * Clear the rr_bmask and set all of the bits that are at this
17930 	 * priority.
17931 	 */
17932 	memset(phba->fcf.fcf_rr_bmask, 0,
17933 			sizeof(*phba->fcf.fcf_rr_bmask));
17934 	spin_lock_irq(&phba->hbalock);
17935 	list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
17936 		if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
17937 			continue;
17938 		/*
17939 		 * the 1st priority that has not FLOGI failed
17940 		 * will be the highest.
17941 		 */
17942 		if (!next_fcf_pri)
17943 			next_fcf_pri = fcf_pri->fcf_rec.priority;
17944 		spin_unlock_irq(&phba->hbalock);
17945 		if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
17946 			rc = lpfc_sli4_fcf_rr_index_set(phba,
17947 						fcf_pri->fcf_rec.fcf_index);
17948 			if (rc)
17949 				return 0;
17950 		}
17951 		spin_lock_irq(&phba->hbalock);
17952 	}
17953 	/*
17954 	 * if next_fcf_pri was not set above and the list is not empty then
17955 	 * we have failed flogis on all of them. So reset flogi failed
17956 	 * and start at the beginning.
17957 	 */
17958 	if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
17959 		list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
17960 			fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
17961 			/*
17962 			 * the 1st priority that has not FLOGI failed
17963 			 * will be the highest.
17964 			 */
17965 			if (!next_fcf_pri)
17966 				next_fcf_pri = fcf_pri->fcf_rec.priority;
17967 			spin_unlock_irq(&phba->hbalock);
17968 			if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
17969 				rc = lpfc_sli4_fcf_rr_index_set(phba,
17970 						fcf_pri->fcf_rec.fcf_index);
17971 				if (rc)
17972 					return 0;
17973 			}
17974 			spin_lock_irq(&phba->hbalock);
17975 		}
17976 	} else
17977 		ret = 1;
17978 	spin_unlock_irq(&phba->hbalock);
17979 
17980 	return ret;
17981 }
17982 /**
17983  * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
17984  * @phba: pointer to lpfc hba data structure.
17985  *
17986  * This routine is to get the next eligible FCF record index in a round
17987  * robin fashion. If the next eligible FCF record index equals to the
17988  * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
17989  * shall be returned, otherwise, the next eligible FCF record's index
17990  * shall be returned.
17991  **/
17992 uint16_t
17993 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
17994 {
17995 	uint16_t next_fcf_index;
17996 
17997 initial_priority:
17998 	/* Search start from next bit of currently registered FCF index */
17999 	next_fcf_index = phba->fcf.current_rec.fcf_indx;
18000 
18001 next_priority:
18002 	/* Determine the next fcf index to check */
18003 	next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
18004 	next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18005 				       LPFC_SLI4_FCF_TBL_INDX_MAX,
18006 				       next_fcf_index);
18007 
18008 	/* Wrap around condition on phba->fcf.fcf_rr_bmask */
18009 	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18010 		/*
18011 		 * If we have wrapped then we need to clear the bits that
18012 		 * have been tested so that we can detect when we should
18013 		 * change the priority level.
18014 		 */
18015 		next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
18016 					       LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
18017 	}
18018 
18019 
18020 	/* Check roundrobin failover list empty condition */
18021 	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
18022 		next_fcf_index == phba->fcf.current_rec.fcf_indx) {
18023 		/*
18024 		 * If next fcf index is not found check if there are lower
18025 		 * Priority level fcf's in the fcf_priority list.
18026 		 * Set up the rr_bmask with all of the avaiable fcf bits
18027 		 * at that level and continue the selection process.
18028 		 */
18029 		if (lpfc_check_next_fcf_pri_level(phba))
18030 			goto initial_priority;
18031 		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18032 				"2844 No roundrobin failover FCF available\n");
18033 		if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
18034 			return LPFC_FCOE_FCF_NEXT_NONE;
18035 		else {
18036 			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
18037 				"3063 Only FCF available idx %d, flag %x\n",
18038 				next_fcf_index,
18039 			phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag);
18040 			return next_fcf_index;
18041 		}
18042 	}
18043 
18044 	if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
18045 		phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
18046 		LPFC_FCF_FLOGI_FAILED) {
18047 		if (list_is_singular(&phba->fcf.fcf_pri_list))
18048 			return LPFC_FCOE_FCF_NEXT_NONE;
18049 
18050 		goto next_priority;
18051 	}
18052 
18053 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18054 			"2845 Get next roundrobin failover FCF (x%x)\n",
18055 			next_fcf_index);
18056 
18057 	return next_fcf_index;
18058 }
18059 
18060 /**
18061  * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
18062  * @phba: pointer to lpfc hba data structure.
18063  *
18064  * This routine sets the FCF record index in to the eligible bmask for
18065  * roundrobin failover search. It checks to make sure that the index
18066  * does not go beyond the range of the driver allocated bmask dimension
18067  * before setting the bit.
18068  *
18069  * Returns 0 if the index bit successfully set, otherwise, it returns
18070  * -EINVAL.
18071  **/
18072 int
18073 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
18074 {
18075 	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18076 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18077 				"2610 FCF (x%x) reached driver's book "
18078 				"keeping dimension:x%x\n",
18079 				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18080 		return -EINVAL;
18081 	}
18082 	/* Set the eligible FCF record index bmask */
18083 	set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18084 
18085 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18086 			"2790 Set FCF (x%x) to roundrobin FCF failover "
18087 			"bmask\n", fcf_index);
18088 
18089 	return 0;
18090 }
18091 
18092 /**
18093  * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
18094  * @phba: pointer to lpfc hba data structure.
18095  *
18096  * This routine clears the FCF record index from the eligible bmask for
18097  * roundrobin failover search. It checks to make sure that the index
18098  * does not go beyond the range of the driver allocated bmask dimension
18099  * before clearing the bit.
18100  **/
18101 void
18102 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
18103 {
18104 	struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
18105 	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
18106 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18107 				"2762 FCF (x%x) reached driver's book "
18108 				"keeping dimension:x%x\n",
18109 				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
18110 		return;
18111 	}
18112 	/* Clear the eligible FCF record index bmask */
18113 	spin_lock_irq(&phba->hbalock);
18114 	list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
18115 				 list) {
18116 		if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
18117 			list_del_init(&fcf_pri->list);
18118 			break;
18119 		}
18120 	}
18121 	spin_unlock_irq(&phba->hbalock);
18122 	clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
18123 
18124 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18125 			"2791 Clear FCF (x%x) from roundrobin failover "
18126 			"bmask\n", fcf_index);
18127 }
18128 
18129 /**
18130  * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
18131  * @phba: pointer to lpfc hba data structure.
18132  *
18133  * This routine is the completion routine for the rediscover FCF table mailbox
18134  * command. If the mailbox command returned failure, it will try to stop the
18135  * FCF rediscover wait timer.
18136  **/
18137 static void
18138 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
18139 {
18140 	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18141 	uint32_t shdr_status, shdr_add_status;
18142 
18143 	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18144 
18145 	shdr_status = bf_get(lpfc_mbox_hdr_status,
18146 			     &redisc_fcf->header.cfg_shdr.response);
18147 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
18148 			     &redisc_fcf->header.cfg_shdr.response);
18149 	if (shdr_status || shdr_add_status) {
18150 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
18151 				"2746 Requesting for FCF rediscovery failed "
18152 				"status x%x add_status x%x\n",
18153 				shdr_status, shdr_add_status);
18154 		if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
18155 			spin_lock_irq(&phba->hbalock);
18156 			phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
18157 			spin_unlock_irq(&phba->hbalock);
18158 			/*
18159 			 * CVL event triggered FCF rediscover request failed,
18160 			 * last resort to re-try current registered FCF entry.
18161 			 */
18162 			lpfc_retry_pport_discovery(phba);
18163 		} else {
18164 			spin_lock_irq(&phba->hbalock);
18165 			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
18166 			spin_unlock_irq(&phba->hbalock);
18167 			/*
18168 			 * DEAD FCF event triggered FCF rediscover request
18169 			 * failed, last resort to fail over as a link down
18170 			 * to FCF registration.
18171 			 */
18172 			lpfc_sli4_fcf_dead_failthrough(phba);
18173 		}
18174 	} else {
18175 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
18176 				"2775 Start FCF rediscover quiescent timer\n");
18177 		/*
18178 		 * Start FCF rediscovery wait timer for pending FCF
18179 		 * before rescan FCF record table.
18180 		 */
18181 		lpfc_fcf_redisc_wait_start_timer(phba);
18182 	}
18183 
18184 	mempool_free(mbox, phba->mbox_mem_pool);
18185 }
18186 
18187 /**
18188  * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
18189  * @phba: pointer to lpfc hba data structure.
18190  *
18191  * This routine is invoked to request for rediscovery of the entire FCF table
18192  * by the port.
18193  **/
18194 int
18195 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
18196 {
18197 	LPFC_MBOXQ_t *mbox;
18198 	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
18199 	int rc, length;
18200 
18201 	/* Cancel retry delay timers to all vports before FCF rediscover */
18202 	lpfc_cancel_all_vport_retry_delay_timer(phba);
18203 
18204 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18205 	if (!mbox) {
18206 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18207 				"2745 Failed to allocate mbox for "
18208 				"requesting FCF rediscover.\n");
18209 		return -ENOMEM;
18210 	}
18211 
18212 	length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
18213 		  sizeof(struct lpfc_sli4_cfg_mhdr));
18214 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18215 			 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
18216 			 length, LPFC_SLI4_MBX_EMBED);
18217 
18218 	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
18219 	/* Set count to 0 for invalidating the entire FCF database */
18220 	bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
18221 
18222 	/* Issue the mailbox command asynchronously */
18223 	mbox->vport = phba->pport;
18224 	mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
18225 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
18226 
18227 	if (rc == MBX_NOT_FINISHED) {
18228 		mempool_free(mbox, phba->mbox_mem_pool);
18229 		return -EIO;
18230 	}
18231 	return 0;
18232 }
18233 
18234 /**
18235  * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
18236  * @phba: pointer to lpfc hba data structure.
18237  *
18238  * This function is the failover routine as a last resort to the FCF DEAD
18239  * event when driver failed to perform fast FCF failover.
18240  **/
18241 void
18242 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
18243 {
18244 	uint32_t link_state;
18245 
18246 	/*
18247 	 * Last resort as FCF DEAD event failover will treat this as
18248 	 * a link down, but save the link state because we don't want
18249 	 * it to be changed to Link Down unless it is already down.
18250 	 */
18251 	link_state = phba->link_state;
18252 	lpfc_linkdown(phba);
18253 	phba->link_state = link_state;
18254 
18255 	/* Unregister FCF if no devices connected to it */
18256 	lpfc_unregister_unused_fcf(phba);
18257 }
18258 
18259 /**
18260  * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
18261  * @phba: pointer to lpfc hba data structure.
18262  * @rgn23_data: pointer to configure region 23 data.
18263  *
18264  * This function gets SLI3 port configure region 23 data through memory dump
18265  * mailbox command. When it successfully retrieves data, the size of the data
18266  * will be returned, otherwise, 0 will be returned.
18267  **/
18268 static uint32_t
18269 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
18270 {
18271 	LPFC_MBOXQ_t *pmb = NULL;
18272 	MAILBOX_t *mb;
18273 	uint32_t offset = 0;
18274 	int rc;
18275 
18276 	if (!rgn23_data)
18277 		return 0;
18278 
18279 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18280 	if (!pmb) {
18281 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18282 				"2600 failed to allocate mailbox memory\n");
18283 		return 0;
18284 	}
18285 	mb = &pmb->u.mb;
18286 
18287 	do {
18288 		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
18289 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
18290 
18291 		if (rc != MBX_SUCCESS) {
18292 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
18293 					"2601 failed to read config "
18294 					"region 23, rc 0x%x Status 0x%x\n",
18295 					rc, mb->mbxStatus);
18296 			mb->un.varDmp.word_cnt = 0;
18297 		}
18298 		/*
18299 		 * dump mem may return a zero when finished or we got a
18300 		 * mailbox error, either way we are done.
18301 		 */
18302 		if (mb->un.varDmp.word_cnt == 0)
18303 			break;
18304 		if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
18305 			mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
18306 
18307 		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
18308 				       rgn23_data + offset,
18309 				       mb->un.varDmp.word_cnt);
18310 		offset += mb->un.varDmp.word_cnt;
18311 	} while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
18312 
18313 	mempool_free(pmb, phba->mbox_mem_pool);
18314 	return offset;
18315 }
18316 
18317 /**
18318  * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
18319  * @phba: pointer to lpfc hba data structure.
18320  * @rgn23_data: pointer to configure region 23 data.
18321  *
18322  * This function gets SLI4 port configure region 23 data through memory dump
18323  * mailbox command. When it successfully retrieves data, the size of the data
18324  * will be returned, otherwise, 0 will be returned.
18325  **/
18326 static uint32_t
18327 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
18328 {
18329 	LPFC_MBOXQ_t *mboxq = NULL;
18330 	struct lpfc_dmabuf *mp = NULL;
18331 	struct lpfc_mqe *mqe;
18332 	uint32_t data_length = 0;
18333 	int rc;
18334 
18335 	if (!rgn23_data)
18336 		return 0;
18337 
18338 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18339 	if (!mboxq) {
18340 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18341 				"3105 failed to allocate mailbox memory\n");
18342 		return 0;
18343 	}
18344 
18345 	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
18346 		goto out;
18347 	mqe = &mboxq->u.mqe;
18348 	mp = (struct lpfc_dmabuf *) mboxq->context1;
18349 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
18350 	if (rc)
18351 		goto out;
18352 	data_length = mqe->un.mb_words[5];
18353 	if (data_length == 0)
18354 		goto out;
18355 	if (data_length > DMP_RGN23_SIZE) {
18356 		data_length = 0;
18357 		goto out;
18358 	}
18359 	lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
18360 out:
18361 	mempool_free(mboxq, phba->mbox_mem_pool);
18362 	if (mp) {
18363 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
18364 		kfree(mp);
18365 	}
18366 	return data_length;
18367 }
18368 
18369 /**
18370  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
18371  * @phba: pointer to lpfc hba data structure.
18372  *
18373  * This function read region 23 and parse TLV for port status to
18374  * decide if the user disaled the port. If the TLV indicates the
18375  * port is disabled, the hba_flag is set accordingly.
18376  **/
18377 void
18378 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
18379 {
18380 	uint8_t *rgn23_data = NULL;
18381 	uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
18382 	uint32_t offset = 0;
18383 
18384 	/* Get adapter Region 23 data */
18385 	rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
18386 	if (!rgn23_data)
18387 		goto out;
18388 
18389 	if (phba->sli_rev < LPFC_SLI_REV4)
18390 		data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
18391 	else {
18392 		if_type = bf_get(lpfc_sli_intf_if_type,
18393 				 &phba->sli4_hba.sli_intf);
18394 		if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
18395 			goto out;
18396 		data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
18397 	}
18398 
18399 	if (!data_size)
18400 		goto out;
18401 
18402 	/* Check the region signature first */
18403 	if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
18404 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18405 			"2619 Config region 23 has bad signature\n");
18406 			goto out;
18407 	}
18408 	offset += 4;
18409 
18410 	/* Check the data structure version */
18411 	if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
18412 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18413 			"2620 Config region 23 has bad version\n");
18414 		goto out;
18415 	}
18416 	offset += 4;
18417 
18418 	/* Parse TLV entries in the region */
18419 	while (offset < data_size) {
18420 		if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
18421 			break;
18422 		/*
18423 		 * If the TLV is not driver specific TLV or driver id is
18424 		 * not linux driver id, skip the record.
18425 		 */
18426 		if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
18427 		    (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
18428 		    (rgn23_data[offset + 3] != 0)) {
18429 			offset += rgn23_data[offset + 1] * 4 + 4;
18430 			continue;
18431 		}
18432 
18433 		/* Driver found a driver specific TLV in the config region */
18434 		sub_tlv_len = rgn23_data[offset + 1] * 4;
18435 		offset += 4;
18436 		tlv_offset = 0;
18437 
18438 		/*
18439 		 * Search for configured port state sub-TLV.
18440 		 */
18441 		while ((offset < data_size) &&
18442 			(tlv_offset < sub_tlv_len)) {
18443 			if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
18444 				offset += 4;
18445 				tlv_offset += 4;
18446 				break;
18447 			}
18448 			if (rgn23_data[offset] != PORT_STE_TYPE) {
18449 				offset += rgn23_data[offset + 1] * 4 + 4;
18450 				tlv_offset += rgn23_data[offset + 1] * 4 + 4;
18451 				continue;
18452 			}
18453 
18454 			/* This HBA contains PORT_STE configured */
18455 			if (!rgn23_data[offset + 2])
18456 				phba->hba_flag |= LINK_DISABLED;
18457 
18458 			goto out;
18459 		}
18460 	}
18461 
18462 out:
18463 	kfree(rgn23_data);
18464 	return;
18465 }
18466 
18467 /**
18468  * lpfc_wr_object - write an object to the firmware
18469  * @phba: HBA structure that indicates port to create a queue on.
18470  * @dmabuf_list: list of dmabufs to write to the port.
18471  * @size: the total byte value of the objects to write to the port.
18472  * @offset: the current offset to be used to start the transfer.
18473  *
18474  * This routine will create a wr_object mailbox command to send to the port.
18475  * the mailbox command will be constructed using the dma buffers described in
18476  * @dmabuf_list to create a list of BDEs. This routine will fill in as many
18477  * BDEs that the imbedded mailbox can support. The @offset variable will be
18478  * used to indicate the starting offset of the transfer and will also return
18479  * the offset after the write object mailbox has completed. @size is used to
18480  * determine the end of the object and whether the eof bit should be set.
18481  *
18482  * Return 0 is successful and offset will contain the the new offset to use
18483  * for the next write.
18484  * Return negative value for error cases.
18485  **/
18486 int
18487 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
18488 	       uint32_t size, uint32_t *offset)
18489 {
18490 	struct lpfc_mbx_wr_object *wr_object;
18491 	LPFC_MBOXQ_t *mbox;
18492 	int rc = 0, i = 0;
18493 	uint32_t shdr_status, shdr_add_status;
18494 	uint32_t mbox_tmo;
18495 	union lpfc_sli4_cfg_shdr *shdr;
18496 	struct lpfc_dmabuf *dmabuf;
18497 	uint32_t written = 0;
18498 
18499 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18500 	if (!mbox)
18501 		return -ENOMEM;
18502 
18503 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
18504 			LPFC_MBOX_OPCODE_WRITE_OBJECT,
18505 			sizeof(struct lpfc_mbx_wr_object) -
18506 			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
18507 
18508 	wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
18509 	wr_object->u.request.write_offset = *offset;
18510 	sprintf((uint8_t *)wr_object->u.request.object_name, "/");
18511 	wr_object->u.request.object_name[0] =
18512 		cpu_to_le32(wr_object->u.request.object_name[0]);
18513 	bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
18514 	list_for_each_entry(dmabuf, dmabuf_list, list) {
18515 		if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
18516 			break;
18517 		wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
18518 		wr_object->u.request.bde[i].addrHigh =
18519 			putPaddrHigh(dmabuf->phys);
18520 		if (written + SLI4_PAGE_SIZE >= size) {
18521 			wr_object->u.request.bde[i].tus.f.bdeSize =
18522 				(size - written);
18523 			written += (size - written);
18524 			bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
18525 		} else {
18526 			wr_object->u.request.bde[i].tus.f.bdeSize =
18527 				SLI4_PAGE_SIZE;
18528 			written += SLI4_PAGE_SIZE;
18529 		}
18530 		i++;
18531 	}
18532 	wr_object->u.request.bde_count = i;
18533 	bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
18534 	if (!phba->sli4_hba.intr_enable)
18535 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18536 	else {
18537 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18538 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18539 	}
18540 	/* The IOCTL status is embedded in the mailbox subheader. */
18541 	shdr = (union lpfc_sli4_cfg_shdr *) &wr_object->header.cfg_shdr;
18542 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18543 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18544 	if (rc != MBX_TIMEOUT)
18545 		mempool_free(mbox, phba->mbox_mem_pool);
18546 	if (shdr_status || shdr_add_status || rc) {
18547 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
18548 				"3025 Write Object mailbox failed with "
18549 				"status x%x add_status x%x, mbx status x%x\n",
18550 				shdr_status, shdr_add_status, rc);
18551 		rc = -ENXIO;
18552 	} else
18553 		*offset += wr_object->u.response.actual_write_length;
18554 	return rc;
18555 }
18556 
18557 /**
18558  * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
18559  * @vport: pointer to vport data structure.
18560  *
18561  * This function iterate through the mailboxq and clean up all REG_LOGIN
18562  * and REG_VPI mailbox commands associated with the vport. This function
18563  * is called when driver want to restart discovery of the vport due to
18564  * a Clear Virtual Link event.
18565  **/
18566 void
18567 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
18568 {
18569 	struct lpfc_hba *phba = vport->phba;
18570 	LPFC_MBOXQ_t *mb, *nextmb;
18571 	struct lpfc_dmabuf *mp;
18572 	struct lpfc_nodelist *ndlp;
18573 	struct lpfc_nodelist *act_mbx_ndlp = NULL;
18574 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
18575 	LIST_HEAD(mbox_cmd_list);
18576 	uint8_t restart_loop;
18577 
18578 	/* Clean up internally queued mailbox commands with the vport */
18579 	spin_lock_irq(&phba->hbalock);
18580 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
18581 		if (mb->vport != vport)
18582 			continue;
18583 
18584 		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
18585 			(mb->u.mb.mbxCommand != MBX_REG_VPI))
18586 			continue;
18587 
18588 		list_del(&mb->list);
18589 		list_add_tail(&mb->list, &mbox_cmd_list);
18590 	}
18591 	/* Clean up active mailbox command with the vport */
18592 	mb = phba->sli.mbox_active;
18593 	if (mb && (mb->vport == vport)) {
18594 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
18595 			(mb->u.mb.mbxCommand == MBX_REG_VPI))
18596 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18597 		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18598 			act_mbx_ndlp = (struct lpfc_nodelist *)mb->context2;
18599 			/* Put reference count for delayed processing */
18600 			act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
18601 			/* Unregister the RPI when mailbox complete */
18602 			mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
18603 		}
18604 	}
18605 	/* Cleanup any mailbox completions which are not yet processed */
18606 	do {
18607 		restart_loop = 0;
18608 		list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
18609 			/*
18610 			 * If this mailox is already processed or it is
18611 			 * for another vport ignore it.
18612 			 */
18613 			if ((mb->vport != vport) ||
18614 				(mb->mbox_flag & LPFC_MBX_IMED_UNREG))
18615 				continue;
18616 
18617 			if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
18618 				(mb->u.mb.mbxCommand != MBX_REG_VPI))
18619 				continue;
18620 
18621 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
18622 			if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18623 				ndlp = (struct lpfc_nodelist *)mb->context2;
18624 				/* Unregister the RPI when mailbox complete */
18625 				mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
18626 				restart_loop = 1;
18627 				spin_unlock_irq(&phba->hbalock);
18628 				spin_lock(shost->host_lock);
18629 				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18630 				spin_unlock(shost->host_lock);
18631 				spin_lock_irq(&phba->hbalock);
18632 				break;
18633 			}
18634 		}
18635 	} while (restart_loop);
18636 
18637 	spin_unlock_irq(&phba->hbalock);
18638 
18639 	/* Release the cleaned-up mailbox commands */
18640 	while (!list_empty(&mbox_cmd_list)) {
18641 		list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
18642 		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
18643 			mp = (struct lpfc_dmabuf *) (mb->context1);
18644 			if (mp) {
18645 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
18646 				kfree(mp);
18647 			}
18648 			ndlp = (struct lpfc_nodelist *) mb->context2;
18649 			mb->context2 = NULL;
18650 			if (ndlp) {
18651 				spin_lock(shost->host_lock);
18652 				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18653 				spin_unlock(shost->host_lock);
18654 				lpfc_nlp_put(ndlp);
18655 			}
18656 		}
18657 		mempool_free(mb, phba->mbox_mem_pool);
18658 	}
18659 
18660 	/* Release the ndlp with the cleaned-up active mailbox command */
18661 	if (act_mbx_ndlp) {
18662 		spin_lock(shost->host_lock);
18663 		act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
18664 		spin_unlock(shost->host_lock);
18665 		lpfc_nlp_put(act_mbx_ndlp);
18666 	}
18667 }
18668 
18669 /**
18670  * lpfc_drain_txq - Drain the txq
18671  * @phba: Pointer to HBA context object.
18672  *
18673  * This function attempt to submit IOCBs on the txq
18674  * to the adapter.  For SLI4 adapters, the txq contains
18675  * ELS IOCBs that have been deferred because the there
18676  * are no SGLs.  This congestion can occur with large
18677  * vport counts during node discovery.
18678  **/
18679 
18680 uint32_t
18681 lpfc_drain_txq(struct lpfc_hba *phba)
18682 {
18683 	LIST_HEAD(completions);
18684 	struct lpfc_sli_ring *pring;
18685 	struct lpfc_iocbq *piocbq = NULL;
18686 	unsigned long iflags = 0;
18687 	char *fail_msg = NULL;
18688 	struct lpfc_sglq *sglq;
18689 	union lpfc_wqe128 wqe128;
18690 	union lpfc_wqe *wqe = (union lpfc_wqe *) &wqe128;
18691 	uint32_t txq_cnt = 0;
18692 
18693 	pring = lpfc_phba_elsring(phba);
18694 
18695 	spin_lock_irqsave(&pring->ring_lock, iflags);
18696 	list_for_each_entry(piocbq, &pring->txq, list) {
18697 		txq_cnt++;
18698 	}
18699 
18700 	if (txq_cnt > pring->txq_max)
18701 		pring->txq_max = txq_cnt;
18702 
18703 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
18704 
18705 	while (!list_empty(&pring->txq)) {
18706 		spin_lock_irqsave(&pring->ring_lock, iflags);
18707 
18708 		piocbq = lpfc_sli_ringtx_get(phba, pring);
18709 		if (!piocbq) {
18710 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
18711 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18712 				"2823 txq empty and txq_cnt is %d\n ",
18713 				txq_cnt);
18714 			break;
18715 		}
18716 		sglq = __lpfc_sli_get_els_sglq(phba, piocbq);
18717 		if (!sglq) {
18718 			__lpfc_sli_ringtx_put(phba, pring, piocbq);
18719 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
18720 			break;
18721 		}
18722 		txq_cnt--;
18723 
18724 		/* The xri and iocb resources secured,
18725 		 * attempt to issue request
18726 		 */
18727 		piocbq->sli4_lxritag = sglq->sli4_lxritag;
18728 		piocbq->sli4_xritag = sglq->sli4_xritag;
18729 		if (NO_XRI == lpfc_sli4_bpl2sgl(phba, piocbq, sglq))
18730 			fail_msg = "to convert bpl to sgl";
18731 		else if (lpfc_sli4_iocb2wqe(phba, piocbq, wqe))
18732 			fail_msg = "to convert iocb to wqe";
18733 		else if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, wqe))
18734 			fail_msg = " - Wq is full";
18735 		else
18736 			lpfc_sli_ringtxcmpl_put(phba, pring, piocbq);
18737 
18738 		if (fail_msg) {
18739 			/* Failed means we can't issue and need to cancel */
18740 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
18741 					"2822 IOCB failed %s iotag 0x%x "
18742 					"xri 0x%x\n",
18743 					fail_msg,
18744 					piocbq->iotag, piocbq->sli4_xritag);
18745 			list_add_tail(&piocbq->list, &completions);
18746 		}
18747 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
18748 	}
18749 
18750 	/* Cancel all the IOCBs that cannot be issued */
18751 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
18752 				IOERR_SLI_ABORTED);
18753 
18754 	return txq_cnt;
18755 }
18756 
18757 /**
18758  * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
18759  * @phba: Pointer to HBA context object.
18760  * @pwqe: Pointer to command WQE.
18761  * @sglq: Pointer to the scatter gather queue object.
18762  *
18763  * This routine converts the bpl or bde that is in the WQE
18764  * to a sgl list for the sli4 hardware. The physical address
18765  * of the bpl/bde is converted back to a virtual address.
18766  * If the WQE contains a BPL then the list of BDE's is
18767  * converted to sli4_sge's. If the WQE contains a single
18768  * BDE then it is converted to a single sli_sge.
18769  * The WQE is still in cpu endianness so the contents of
18770  * the bpl can be used without byte swapping.
18771  *
18772  * Returns valid XRI = Success, NO_XRI = Failure.
18773  */
18774 static uint16_t
18775 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
18776 		 struct lpfc_sglq *sglq)
18777 {
18778 	uint16_t xritag = NO_XRI;
18779 	struct ulp_bde64 *bpl = NULL;
18780 	struct ulp_bde64 bde;
18781 	struct sli4_sge *sgl  = NULL;
18782 	struct lpfc_dmabuf *dmabuf;
18783 	union lpfc_wqe *wqe;
18784 	int numBdes = 0;
18785 	int i = 0;
18786 	uint32_t offset = 0; /* accumulated offset in the sg request list */
18787 	int inbound = 0; /* number of sg reply entries inbound from firmware */
18788 	uint32_t cmd;
18789 
18790 	if (!pwqeq || !sglq)
18791 		return xritag;
18792 
18793 	sgl  = (struct sli4_sge *)sglq->sgl;
18794 	wqe = &pwqeq->wqe;
18795 	pwqeq->iocb.ulpIoTag = pwqeq->iotag;
18796 
18797 	cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
18798 	if (cmd == CMD_XMIT_BLS_RSP64_WQE)
18799 		return sglq->sli4_xritag;
18800 	numBdes = pwqeq->rsvd2;
18801 	if (numBdes) {
18802 		/* The addrHigh and addrLow fields within the WQE
18803 		 * have not been byteswapped yet so there is no
18804 		 * need to swap them back.
18805 		 */
18806 		if (pwqeq->context3)
18807 			dmabuf = (struct lpfc_dmabuf *)pwqeq->context3;
18808 		else
18809 			return xritag;
18810 
18811 		bpl  = (struct ulp_bde64 *)dmabuf->virt;
18812 		if (!bpl)
18813 			return xritag;
18814 
18815 		for (i = 0; i < numBdes; i++) {
18816 			/* Should already be byte swapped. */
18817 			sgl->addr_hi = bpl->addrHigh;
18818 			sgl->addr_lo = bpl->addrLow;
18819 
18820 			sgl->word2 = le32_to_cpu(sgl->word2);
18821 			if ((i+1) == numBdes)
18822 				bf_set(lpfc_sli4_sge_last, sgl, 1);
18823 			else
18824 				bf_set(lpfc_sli4_sge_last, sgl, 0);
18825 			/* swap the size field back to the cpu so we
18826 			 * can assign it to the sgl.
18827 			 */
18828 			bde.tus.w = le32_to_cpu(bpl->tus.w);
18829 			sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
18830 			/* The offsets in the sgl need to be accumulated
18831 			 * separately for the request and reply lists.
18832 			 * The request is always first, the reply follows.
18833 			 */
18834 			switch (cmd) {
18835 			case CMD_GEN_REQUEST64_WQE:
18836 				/* add up the reply sg entries */
18837 				if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
18838 					inbound++;
18839 				/* first inbound? reset the offset */
18840 				if (inbound == 1)
18841 					offset = 0;
18842 				bf_set(lpfc_sli4_sge_offset, sgl, offset);
18843 				bf_set(lpfc_sli4_sge_type, sgl,
18844 					LPFC_SGE_TYPE_DATA);
18845 				offset += bde.tus.f.bdeSize;
18846 				break;
18847 			case CMD_FCP_TRSP64_WQE:
18848 				bf_set(lpfc_sli4_sge_offset, sgl, 0);
18849 				bf_set(lpfc_sli4_sge_type, sgl,
18850 					LPFC_SGE_TYPE_DATA);
18851 				break;
18852 			case CMD_FCP_TSEND64_WQE:
18853 			case CMD_FCP_TRECEIVE64_WQE:
18854 				bf_set(lpfc_sli4_sge_type, sgl,
18855 					bpl->tus.f.bdeFlags);
18856 				if (i < 3)
18857 					offset = 0;
18858 				else
18859 					offset += bde.tus.f.bdeSize;
18860 				bf_set(lpfc_sli4_sge_offset, sgl, offset);
18861 				break;
18862 			}
18863 			sgl->word2 = cpu_to_le32(sgl->word2);
18864 			bpl++;
18865 			sgl++;
18866 		}
18867 	} else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
18868 		/* The addrHigh and addrLow fields of the BDE have not
18869 		 * been byteswapped yet so they need to be swapped
18870 		 * before putting them in the sgl.
18871 		 */
18872 		sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
18873 		sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
18874 		sgl->word2 = le32_to_cpu(sgl->word2);
18875 		bf_set(lpfc_sli4_sge_last, sgl, 1);
18876 		sgl->word2 = cpu_to_le32(sgl->word2);
18877 		sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
18878 	}
18879 	return sglq->sli4_xritag;
18880 }
18881 
18882 /**
18883  * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
18884  * @phba: Pointer to HBA context object.
18885  * @ring_number: Base sli ring number
18886  * @pwqe: Pointer to command WQE.
18887  **/
18888 int
18889 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
18890 		    struct lpfc_iocbq *pwqe)
18891 {
18892 	union lpfc_wqe *wqe = &pwqe->wqe;
18893 	struct lpfc_nvmet_rcv_ctx *ctxp;
18894 	struct lpfc_queue *wq;
18895 	struct lpfc_sglq *sglq;
18896 	struct lpfc_sli_ring *pring;
18897 	unsigned long iflags;
18898 	uint32_t ret = 0;
18899 
18900 	/* NVME_LS and NVME_LS ABTS requests. */
18901 	if (pwqe->iocb_flag & LPFC_IO_NVME_LS) {
18902 		pring =  phba->sli4_hba.nvmels_wq->pring;
18903 		spin_lock_irqsave(&pring->ring_lock, iflags);
18904 		sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
18905 		if (!sglq) {
18906 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
18907 			return WQE_BUSY;
18908 		}
18909 		pwqe->sli4_lxritag = sglq->sli4_lxritag;
18910 		pwqe->sli4_xritag = sglq->sli4_xritag;
18911 		if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
18912 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
18913 			return WQE_ERROR;
18914 		}
18915 		bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
18916 		       pwqe->sli4_xritag);
18917 		ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
18918 		if (ret) {
18919 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
18920 			return ret;
18921 		}
18922 
18923 		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
18924 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
18925 		return 0;
18926 	}
18927 
18928 	/* NVME_FCREQ and NVME_ABTS requests */
18929 	if (pwqe->iocb_flag & LPFC_IO_NVME) {
18930 		/* Get the IO distribution (hba_wqidx) for WQ assignment. */
18931 		pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
18932 
18933 		spin_lock_irqsave(&pring->ring_lock, iflags);
18934 		wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
18935 		bf_set(wqe_cqid, &wqe->generic.wqe_com,
18936 		      phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
18937 		ret = lpfc_sli4_wq_put(wq, wqe);
18938 		if (ret) {
18939 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
18940 			return ret;
18941 		}
18942 		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
18943 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
18944 		return 0;
18945 	}
18946 
18947 	/* NVMET requests */
18948 	if (pwqe->iocb_flag & LPFC_IO_NVMET) {
18949 		/* Get the IO distribution (hba_wqidx) for WQ assignment. */
18950 		pring = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx]->pring;
18951 
18952 		spin_lock_irqsave(&pring->ring_lock, iflags);
18953 		ctxp = pwqe->context2;
18954 		sglq = ctxp->ctxbuf->sglq;
18955 		if (pwqe->sli4_xritag ==  NO_XRI) {
18956 			pwqe->sli4_lxritag = sglq->sli4_lxritag;
18957 			pwqe->sli4_xritag = sglq->sli4_xritag;
18958 		}
18959 		bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
18960 		       pwqe->sli4_xritag);
18961 		wq = phba->sli4_hba.nvme_wq[pwqe->hba_wqidx];
18962 		bf_set(wqe_cqid, &wqe->generic.wqe_com,
18963 		      phba->sli4_hba.nvme_cq[pwqe->hba_wqidx]->queue_id);
18964 		ret = lpfc_sli4_wq_put(wq, wqe);
18965 		if (ret) {
18966 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
18967 			return ret;
18968 		}
18969 		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
18970 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
18971 		return 0;
18972 	}
18973 	return WQE_ERROR;
18974 }
18975